Merge branch 'master' of github.com:n8n-io/n8n into ado-2808-1

This commit is contained in:
Mutasem Aldmour 2024-11-08 11:19:02 +01:00
commit 34f2cc557b
No known key found for this signature in database
GPG key ID: 3DFA8122BB7FD6B8
26 changed files with 378 additions and 57 deletions

View file

@ -14,6 +14,7 @@ import {
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
const modelField: INodeProperties = {
displayName: 'Model',
@ -214,6 +215,7 @@ export class LmChatAnthropic implements INodeType {
topK: options.topK,
topP: options.topP,
callbacks: [new N8nLlmTracing(this, { tokensUsageParser })],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -12,6 +12,7 @@ import { ChatOllama } from '@langchain/ollama';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { ollamaModel, ollamaOptions, ollamaDescription } from '../LMOllama/description';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatOllama implements INodeType {
description: INodeTypeDescription = {
@ -64,6 +65,7 @@ export class LmChatOllama implements INodeType {
model: modelName,
format: options.format === 'default' ? undefined : options.format,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -1,19 +1,18 @@
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
import {
NodeConnectionType,
type INodeType,
type INodeTypeDescription,
type ISupplyDataFunctions,
type SupplyData,
type JsonObject,
NodeApiError,
} from 'n8n-workflow';
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { RateLimitError } from 'openai';
import { getCustomErrorMessage } from '../../vendors/OpenAi/helpers/error-handling';
export class LmChatOpenAi implements INodeType {
description: INodeTypeDescription = {
@ -276,25 +275,7 @@ export class LmChatOpenAi implements INodeType {
response_format: { type: options.responseFormat },
}
: undefined,
onFailedAttempt: (error: any) => {
// If the error is a rate limit error, we want to handle it differently
// because OpenAI has multiple different rate limit errors
if (error instanceof RateLimitError) {
const errorCode = error?.code;
if (errorCode) {
const customErrorMessage = getCustomErrorMessage(errorCode);
const apiError = new NodeApiError(this.getNode(), error as unknown as JsonObject);
if (customErrorMessage) {
apiError.message = customErrorMessage;
}
throw apiError;
}
}
throw error;
},
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),
});
return {

View file

@ -10,6 +10,7 @@ import {
import { Cohere } from '@langchain/cohere';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmCohere implements INodeType {
description: INodeTypeDescription = {
@ -99,6 +100,7 @@ export class LmCohere implements INodeType {
apiKey: credentials.apiKey as string,
...options,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -11,6 +11,7 @@ import { Ollama } from '@langchain/community/llms/ollama';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { ollamaDescription, ollamaModel, ollamaOptions } from './description';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmOllama implements INodeType {
description: INodeTypeDescription = {
@ -62,6 +63,7 @@ export class LmOllama implements INodeType {
model: modelName,
...options,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -10,6 +10,7 @@ import type {
import { OpenAI, type ClientOptions } from '@langchain/openai';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
type LmOpenAiOptions = {
baseURL?: string;
@ -260,6 +261,7 @@ export class LmOpenAi implements INodeType {
timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -10,6 +10,7 @@ import {
import { HuggingFaceInference } from '@langchain/community/llms/hf';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmOpenHuggingFaceInference implements INodeType {
description: INodeTypeDescription = {
@ -143,6 +144,7 @@ export class LmOpenHuggingFaceInference implements INodeType {
apiKey: credentials.apiKey as string,
...options,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -10,6 +10,7 @@ import {
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatAwsBedrock implements INodeType {
description: INodeTypeDescription = {
@ -151,6 +152,7 @@ export class LmChatAwsBedrock implements INodeType {
sessionToken: credentials.sessionToken as string,
},
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -10,6 +10,7 @@ import {
import { ChatOpenAI } from '@langchain/openai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatAzureOpenAi implements INodeType {
description: INodeTypeDescription = {
@ -195,6 +196,7 @@ export class LmChatAzureOpenAi implements INodeType {
response_format: { type: options.responseFormat },
}
: undefined,
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -11,6 +11,7 @@ import type { SafetySetting } from '@google/generative-ai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { additionalOptions } from '../gemini-common/additional-options';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatGoogleGemini implements INodeType {
description: INodeTypeDescription = {
@ -144,6 +145,7 @@ export class LmChatGoogleGemini implements INodeType {
maxOutputTokens: options.maxOutputTokens,
safetySettings,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -17,6 +17,7 @@ import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { additionalOptions } from '../gemini-common/additional-options';
import { makeErrorFromStatus } from './error-handling';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatGoogleVertex implements INodeType {
description: INodeTypeDescription = {
@ -170,7 +171,8 @@ export class LmChatGoogleVertex implements INodeType {
safetySettings,
callbacks: [new N8nLlmTracing(this)],
// Handle ChatVertexAI invocation errors to provide better error messages
onFailedAttempt: (error: any) => {
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, (error: any) => {
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
const customError = makeErrorFromStatus(Number(error?.response?.status), {
modelName,
});
@ -180,7 +182,7 @@ export class LmChatGoogleVertex implements INodeType {
}
throw error;
},
}),
});
return {

View file

@ -10,6 +10,7 @@ import {
import { ChatGroq } from '@langchain/groq';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatGroq implements INodeType {
description: INodeTypeDescription = {
@ -144,6 +145,7 @@ export class LmChatGroq implements INodeType {
maxTokens: options.maxTokensToSample,
temperature: options.temperature,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -11,6 +11,7 @@ import type { ChatMistralAIInput } from '@langchain/mistralai';
import { ChatMistralAI } from '@langchain/mistralai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
export class LmChatMistralCloud implements INodeType {
description: INodeTypeDescription = {
@ -190,6 +191,7 @@ export class LmChatMistralCloud implements INodeType {
modelName,
...options,
callbacks: [new N8nLlmTracing(this)],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});
return {

View file

@ -1,17 +1,18 @@
import { BaseCallbackHandler } from '@langchain/core/callbacks/base';
import type { SerializedFields } from '@langchain/core/dist/load/map_keys';
import { getModelNameForTiktoken } from '@langchain/core/language_models/base';
import { encodingForModel } from '@langchain/core/utils/tiktoken';
import type {
Serialized,
SerializedNotImplemented,
SerializedSecret,
} from '@langchain/core/load/serializable';
import type { LLMResult } from '@langchain/core/outputs';
import type { IDataObject, ISupplyDataFunctions } from 'n8n-workflow';
import { NodeConnectionType } from 'n8n-workflow';
import { pick } from 'lodash';
import type { BaseMessage } from '@langchain/core/messages';
import type { SerializedFields } from '@langchain/core/dist/load/map_keys';
import type { LLMResult } from '@langchain/core/outputs';
import { encodingForModel } from '@langchain/core/utils/tiktoken';
import type { IDataObject, ISupplyDataFunctions, JsonObject } from 'n8n-workflow';
import { pick } from 'lodash';
import { NodeConnectionType, NodeError, NodeOperationError } from 'n8n-workflow';
import { logAiEvent } from '../../utils/helpers';
type TokensUsageParser = (llmOutput: LLMResult['llmOutput']) => {
@ -30,6 +31,10 @@ const TIKTOKEN_ESTIMATE_MODEL = 'gpt-4o';
export class N8nLlmTracing extends BaseCallbackHandler {
name = 'N8nLlmTracing';
// This flag makes sure that LangChain will wait for the handlers to finish before continuing
// This is crucial for the handleLLMError handler to work correctly (it should be called before the error is propagated to the root node)
awaitHandlers = true;
connectionType = NodeConnectionType.AiLanguageModel;
promptTokensEstimate = 0;
@ -135,6 +140,7 @@ export class N8nLlmTracing extends BaseCallbackHandler {
this.executionFunctions.addOutputData(this.connectionType, runDetails.index, [
[{ json: { ...response } }],
]);
logAiEvent(this.executionFunctions, 'ai-llm-generated-output', {
messages: parsedMessages,
options: runDetails.options,
@ -172,6 +178,8 @@ export class N8nLlmTracing extends BaseCallbackHandler {
runId: string,
parentRunId?: string | undefined,
) {
const runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };
// Filter out non-x- headers to avoid leaking sensitive information in logs
if (typeof error === 'object' && error?.hasOwnProperty('headers')) {
const errorWithHeaders = error as { headers: Record<string, unknown> };
@ -183,6 +191,19 @@ export class N8nLlmTracing extends BaseCallbackHandler {
});
}
if (error instanceof NodeError) {
this.executionFunctions.addOutputData(this.connectionType, runDetails.index, error);
} else {
// If the error is not a NodeError, we wrap it in a NodeOperationError
this.executionFunctions.addOutputData(
this.connectionType,
runDetails.index,
new NodeOperationError(this.executionFunctions.getNode(), error as JsonObject, {
functionality: 'configuration-node',
}),
);
}
logAiEvent(this.executionFunctions, 'ai-llm-errored', {
error: Object.keys(error).length === 0 ? error.toString() : error,
runId,

View file

@ -0,0 +1,66 @@
import { n8nDefaultFailedAttemptHandler } from './n8nDefaultFailedAttemptHandler';
class MockHttpError extends Error {
response: { status: number };
constructor(message: string, code: number) {
super(message);
this.response = { status: code };
}
}
describe('n8nDefaultFailedAttemptHandler', () => {
it('should throw error if message starts with "Cancel"', () => {
const error = new Error('Cancel operation');
expect(() => n8nDefaultFailedAttemptHandler(error)).toThrow(error);
});
it('should throw error if message starts with "AbortError"', () => {
const error = new Error('AbortError occurred');
expect(() => n8nDefaultFailedAttemptHandler(error)).toThrow(error);
});
it('should throw error if name is "AbortError"', () => {
class MockAbortError extends Error {
constructor() {
super('Some error');
this.name = 'AbortError';
}
}
const error = new MockAbortError();
expect(() => n8nDefaultFailedAttemptHandler(error)).toThrow(error);
});
it('should throw error if code is "ECONNABORTED"', () => {
class MockAbortError extends Error {
code: string;
constructor() {
super('Some error');
this.code = 'ECONNABORTED';
}
}
const error = new MockAbortError();
expect(() => n8nDefaultFailedAttemptHandler(error)).toThrow(error);
});
it('should throw error if status is in STATUS_NO_RETRY', () => {
const error = new MockHttpError('Some error', 400);
expect(() => n8nDefaultFailedAttemptHandler(error)).toThrow(error);
});
it('should not throw error if status is not in STATUS_NO_RETRY', () => {
const error = new MockHttpError('Some error', 500);
error.response = { status: 500 };
expect(() => n8nDefaultFailedAttemptHandler(error)).not.toThrow();
});
it('should not throw error if no conditions are met', () => {
const error = new Error('Some random error');
expect(() => n8nDefaultFailedAttemptHandler(error)).not.toThrow();
});
});

View file

@ -0,0 +1,41 @@
const STATUS_NO_RETRY = [
400, // Bad Request
401, // Unauthorized
402, // Payment Required
403, // Forbidden
404, // Not Found
405, // Method Not Allowed
406, // Not Acceptable
407, // Proxy Authentication Required
409, // Conflict
];
/**
* This function is used as a default handler for failed attempts in all LLMs.
* It is based on a default handler from the langchain core package.
* It throws an error when it encounters a known error that should not be retried.
* @param error
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export const n8nDefaultFailedAttemptHandler = (error: any) => {
if (
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access,@typescript-eslint/no-unsafe-call
error?.message?.startsWith?.('Cancel') ||
error?.message?.startsWith?.('AbortError') ||
error?.name === 'AbortError'
) {
throw error;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any,@typescript-eslint/no-unsafe-member-access
if (error?.code === 'ECONNABORTED') {
throw error;
}
const status =
// eslint-disable-next-line @typescript-eslint/no-explicit-any,@typescript-eslint/no-unsafe-member-access
error?.response?.status ?? error?.status;
if (status && STATUS_NO_RETRY.includes(+status)) {
throw error;
}
};

View file

@ -0,0 +1,65 @@
import { mock } from 'jest-mock-extended';
import type { ISupplyDataFunctions } from 'n8n-workflow';
import { ApplicationError, NodeApiError } from 'n8n-workflow';
import { makeN8nLlmFailedAttemptHandler } from './n8nLlmFailedAttemptHandler';
describe('makeN8nLlmFailedAttemptHandler', () => {
const ctx = mock<ISupplyDataFunctions>({
getNode: jest.fn(),
});
it('should throw a wrapped error, when NO custom handler is provided', () => {
const handler = makeN8nLlmFailedAttemptHandler(ctx);
expect(() => handler(new Error('Test error'))).toThrow(NodeApiError);
});
it('should wrapped error when custom handler is provided', () => {
const customHandler = jest.fn();
const handler = makeN8nLlmFailedAttemptHandler(ctx, customHandler);
expect(() => handler(new Error('Test error'))).toThrow(NodeApiError);
expect(customHandler).toHaveBeenCalled();
});
it('should throw wrapped exception from custom handler', () => {
const customHandler = jest.fn(() => {
throw new ApplicationError('Custom handler error');
});
const handler = makeN8nLlmFailedAttemptHandler(ctx, customHandler);
expect(() => handler(new Error('Test error'))).toThrow('Custom handler error');
expect(customHandler).toHaveBeenCalled();
});
it('should not throw if retries are left', () => {
const customHandler = jest.fn();
const handler = makeN8nLlmFailedAttemptHandler(ctx, customHandler);
const error = new Error('Test error');
(error as any).retriesLeft = 1;
expect(() => handler(error)).not.toThrow();
});
it('should throw NodeApiError if no retries are left', () => {
const handler = makeN8nLlmFailedAttemptHandler(ctx);
const error = new Error('Test error');
(error as any).retriesLeft = 0;
expect(() => handler(error)).toThrow(NodeApiError);
});
it('should throw NodeApiError if no retries are left with custom handler', () => {
const customHandler = jest.fn();
const handler = makeN8nLlmFailedAttemptHandler(ctx, customHandler);
const error = new Error('Test error');
(error as any).retriesLeft = 0;
expect(() => handler(error)).toThrow(NodeApiError);
expect(customHandler).toHaveBeenCalled();
});
});

View file

@ -0,0 +1,46 @@
import type { FailedAttemptHandler } from '@langchain/core/dist/utils/async_caller';
import type { ISupplyDataFunctions, JsonObject } from 'n8n-workflow';
import { NodeApiError } from 'n8n-workflow';
import { n8nDefaultFailedAttemptHandler } from './n8nDefaultFailedAttemptHandler';
/**
* This function returns a custom failed attempt handler for using with LangChain models.
* It first tries to use a custom handler passed as an argument, and if that doesn't throw an error, it uses the default handler.
* It always wraps the error in a NodeApiError.
* It throws an error ONLY if there are no retries left.
*/
export const makeN8nLlmFailedAttemptHandler = (
ctx: ISupplyDataFunctions,
handler?: FailedAttemptHandler,
): FailedAttemptHandler => {
return (error: any) => {
try {
// Try custom error handler first
handler?.(error);
// If it didn't throw an error, use the default handler
n8nDefaultFailedAttemptHandler(error);
} catch (e) {
// Wrap the error in a NodeApiError
const apiError = new NodeApiError(ctx.getNode(), e as unknown as JsonObject, {
functionality: 'configuration-node',
});
throw apiError;
}
// If no error was thrown, check if it is the last retry
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
if (error?.retriesLeft > 0) {
return;
}
// If there are no retries left, throw the error wrapped in a NodeApiError
const apiError = new NodeApiError(ctx.getNode(), error as unknown as JsonObject, {
functionality: 'configuration-node',
});
throw apiError;
};
};

View file

@ -1,4 +1,5 @@
import { OpenAIError } from 'openai/error';
import { RateLimitError } from 'openai';
const errorMap: Record<string, string> = {
insufficient_quota: 'OpenAI: Insufficient quota',
@ -12,3 +13,20 @@ export function getCustomErrorMessage(errorCode: string): string | undefined {
export function isOpenAiError(error: any): error is OpenAIError {
return error instanceof OpenAIError;
}
export const openAiFailedAttemptHandler = (error: any) => {
if (error instanceof RateLimitError) {
// If the error is a rate limit error, we want to handle it differently
// because OpenAI has multiple different rate limit errors
const errorCode = error?.code;
if (errorCode) {
const customErrorMessage = getCustomErrorMessage(errorCode);
if (customErrorMessage) {
error.message = customErrorMessage;
}
}
throw error;
}
};

View file

@ -17,13 +17,6 @@ import { logAiEvent, isToolsInstance, isBaseChatMemory, isBaseChatMessageHistory
import { N8nBinaryLoader } from './N8nBinaryLoader';
import { N8nJsonLoader } from './N8nJsonLoader';
const errorsMap: { [key: string]: { message: string; description: string } } = {
'You exceeded your current quota, please check your plan and billing details.': {
message: 'OpenAI quota exceeded',
description: 'You exceeded your current quota, please check your plan and billing details.',
},
};
export async function callMethodAsync<T>(
this: T,
parameters: {
@ -37,30 +30,25 @@ export async function callMethodAsync<T>(
try {
return await parameters.method.call(this, ...parameters.arguments);
} catch (e) {
// Propagate errors from sub-nodes
if (e.functionality === 'configuration-node') throw e;
const connectedNode = parameters.executeFunctions.getNode();
const error = new NodeOperationError(connectedNode, e, {
functionality: 'configuration-node',
});
if (errorsMap[error.message]) {
error.description = errorsMap[error.message].description;
error.message = errorsMap[error.message].message;
}
parameters.executeFunctions.addOutputData(
parameters.connectionType,
parameters.currentNodeRunIndex,
error,
);
if (error.message) {
if (!error.description) {
error.description = error.message;
}
throw error;
}
throw new NodeOperationError(
connectedNode,
`Error on node "${connectedNode.name}" which is connected via input "${parameters.connectionType}"`,
@ -82,8 +70,6 @@ export function callMethodSync<T>(
try {
return parameters.method.call(this, ...parameters.arguments);
} catch (e) {
// Propagate errors from sub-nodes
if (e.functionality === 'configuration-node') throw e;
const connectedNode = parameters.executeFunctions.getNode();
const error = new NodeOperationError(connectedNode, e);
parameters.executeFunctions.addOutputData(
@ -91,6 +77,7 @@ export function callMethodSync<T>(
parameters.currentNodeRunIndex,
error,
);
throw new NodeOperationError(
connectedNode,
`Error on node "${connectedNode.name}" which is connected via input "${parameters.connectionType}"`,

View file

@ -0,0 +1,67 @@
import type { GlobalConfig } from '@n8n/config';
import { mock } from 'jest-mock-extended';
import { ServerResponse } from 'node:http';
import type WebSocket from 'ws';
import type { TaskRunnerAuthController } from '@/runners/auth/task-runner-auth.controller';
import { TaskRunnerServer } from '@/runners/task-runner-server';
import type { TaskRunnerServerInitRequest } from '../runner-types';
describe('TaskRunnerServer', () => {
describe('handleUpgradeRequest', () => {
it('should close WebSocket when response status code is > 200', () => {
const ws = mock<WebSocket>();
const request = mock<TaskRunnerServerInitRequest>({
url: '/runners/_ws',
ws,
});
const server = new TaskRunnerServer(
mock(),
mock<GlobalConfig>({ taskRunners: { path: '/runners' } }),
mock<TaskRunnerAuthController>(),
mock(),
);
// @ts-expect-error Private property
server.handleUpgradeRequest(request, mock(), Buffer.from(''));
const response = new ServerResponse(request);
response.writeHead = (statusCode) => {
if (statusCode > 200) ws.close();
return response;
};
response.writeHead(401);
expect(ws.close).toHaveBeenCalledWith(); // no args
});
it('should not close WebSocket when response status code is 200', () => {
const ws = mock<WebSocket>();
const request = mock<TaskRunnerServerInitRequest>({
url: '/runners/_ws',
ws,
});
const server = new TaskRunnerServer(
mock(),
mock<GlobalConfig>({ taskRunners: { path: '/runners' } }),
mock<TaskRunnerAuthController>(),
mock(),
);
// @ts-expect-error Private property
server.handleUpgradeRequest(request, mock(), Buffer.from(''));
const response = new ServerResponse(request);
response.writeHead = (statusCode) => {
if (statusCode > 200) ws.close();
return response;
};
response.writeHead(200);
expect(ws.close).not.toHaveBeenCalled();
});
});
});

View file

@ -181,7 +181,7 @@ export class TaskRunnerServer {
const response = new ServerResponse(request);
response.writeHead = (statusCode) => {
if (statusCode > 200) ws.close(100);
if (statusCode > 200) ws.close();
return response;
};

View file

@ -1,6 +1,7 @@
import { parse } from 'flatted';
import { h, ref } from 'vue';
import type { useRouter } from 'vue-router';
import { TelemetryHelpers } from 'n8n-workflow';
import type {
ExpressionError,
IDataObject,
@ -12,8 +13,8 @@ import type {
IExecuteContextData,
NodeOperationError,
INodeTypeDescription,
NodeError,
} from 'n8n-workflow';
import { TelemetryHelpers } from 'n8n-workflow';
import type { PushMessage, PushPayload } from '@n8n/api-types';
import type { IExecutionResponse, IExecutionsCurrentSummaryExtended } from '@/Interface';
@ -322,8 +323,7 @@ export function usePushConnection({ router }: { router: ReturnType<typeof useRou
if (
runDataExecuted.data.resultData.error?.name === 'ExpressionError' &&
(runDataExecuted.data.resultData.error as ExpressionError).context.functionality ===
'pairedItem'
(runDataExecuted.data.resultData.error as ExpressionError).functionality === 'pairedItem'
) {
const error = runDataExecuted.data.resultData.error as ExpressionError;
@ -377,8 +377,9 @@ export function usePushConnection({ router }: { router: ReturnType<typeof useRou
duration: 0,
});
} else if (
runDataExecuted.data.resultData.error?.name === 'NodeOperationError' &&
(runDataExecuted.data.resultData.error as NodeOperationError).functionality ===
(runDataExecuted.data.resultData.error?.name === 'NodeOperationError' ||
runDataExecuted.data.resultData.error?.name === 'NodeApiError') &&
(runDataExecuted.data.resultData.error as NodeError).functionality ===
'configuration-node'
) {
// If the error is a configuration error of the node itself doesn't get executed so we can't use lastNodeExecuted for the title

View file

@ -60,7 +60,7 @@ export const isNoInputConnectionError = (error: unknown): error is ExpressionErr
};
export const isAnyPairedItemError = (error: unknown): error is ExpressionError => {
return error instanceof ExpressionError && error.context.functionality === 'pairedItem';
return error instanceof ExpressionError && error.functionality === 'pairedItem';
};
export const getResolvableState = (error: unknown, ignoreError = false): ResolvableState => {

View file

@ -40,7 +40,6 @@ export class ExpressionError extends ExecutionBaseError {
'causeDetailed',
'descriptionTemplate',
'descriptionKey',
'functionality',
'itemIndex',
'messageTemplate',
'nodeCause',
@ -48,7 +47,12 @@ export class ExpressionError extends ExecutionBaseError {
'runIndex',
'type',
];
if (options !== undefined) {
if (options.functionality !== undefined) {
this.functionality = options.functionality;
}
Object.keys(options as IDataObject).forEach((key) => {
if (allowedKeys.includes(key)) {
this.context[key] = (options as IDataObject)[key];

View file

@ -261,7 +261,7 @@ export class NodeApiError extends NodeError {
messageMapping,
);
if (functionality !== undefined) this.context.functionality = functionality;
if (functionality !== undefined) this.functionality = functionality;
if (runIndex !== undefined) this.context.runIndex = runIndex;
if (itemIndex !== undefined) this.context.itemIndex = itemIndex;
}