diff --git a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts index 5c84fa16b3..71022f84fe 100644 --- a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts +++ b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts @@ -1,4 +1,9 @@ -import { ApplicationError, NodeConnectionType, NodeOperationError } from 'n8n-workflow'; +import { + ApplicationError, + NodeApiError, + NodeConnectionType, + NodeOperationError, +} from 'n8n-workflow'; import type { IBinaryData, IDataObject, @@ -30,6 +35,10 @@ import { isChatInstance, } from '../../../utils/helpers'; import { getTracingConfig } from '../../../utils/tracing'; +import { + getCustomErrorMessage as getCustomOpenAiErrorMessage, + isOpenAiError, +} from '../../vendors/OpenAi/helpers/error-handling'; interface MessagesTemplate { type: string; @@ -580,6 +589,18 @@ export class ChainLlm implements INodeType { }); }); } catch (error) { + // If the error is an OpenAI's rate limit error, we want to handle it differently + // because OpenAI has multiple different rate limit errors + if (error instanceof NodeApiError && isOpenAiError(error.cause)) { + const openAiErrorCode: string | undefined = (error.cause as any).error?.code; + if (openAiErrorCode) { + const customMessage = getCustomOpenAiErrorMessage(openAiErrorCode); + if (customMessage) { + error.message = customMessage; + } + } + } + if (this.continueOnFail()) { returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } }); continue; diff --git a/packages/@n8n/nodes-langchain/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.ts b/packages/@n8n/nodes-langchain/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.ts index 65e3145bd9..dcf483a751 100644 --- a/packages/@n8n/nodes-langchain/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.ts +++ b/packages/@n8n/nodes-langchain/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.ts @@ -5,11 +5,15 @@ import { type INodeType, type INodeTypeDescription, type SupplyData, + type JsonObject, + NodeApiError, } from 'n8n-workflow'; import { ChatOpenAI, type ClientOptions } from '@langchain/openai'; import { getConnectionHintNoticeField } from '../../../utils/sharedFields'; import { N8nLlmTracing } from '../N8nLlmTracing'; +import { RateLimitError } from 'openai'; +import { getCustomErrorMessage } from '../../vendors/OpenAi/helpers/error-handling'; export class LmChatOpenAi implements INodeType { description: INodeTypeDescription = { @@ -272,6 +276,25 @@ export class LmChatOpenAi implements INodeType { response_format: { type: options.responseFormat }, } : undefined, + onFailedAttempt: (error: any) => { + // If the error is a rate limit error, we want to handle it differently + // because OpenAI has multiple different rate limit errors + if (error instanceof RateLimitError) { + const errorCode = error?.code; + if (errorCode) { + const customErrorMessage = getCustomErrorMessage(errorCode); + + const apiError = new NodeApiError(this.getNode(), error as unknown as JsonObject); + if (customErrorMessage) { + apiError.message = customErrorMessage; + } + + throw apiError; + } + } + + throw error; + }, }); return { diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/OpenAi/actions/router.ts b/packages/@n8n/nodes-langchain/nodes/vendors/OpenAi/actions/router.ts index b0ce414ce8..634923a5e6 100644 --- a/packages/@n8n/nodes-langchain/nodes/vendors/OpenAi/actions/router.ts +++ b/packages/@n8n/nodes-langchain/nodes/vendors/OpenAi/actions/router.ts @@ -12,6 +12,7 @@ import * as image from './image'; import * as text from './text'; import type { OpenAiType } from './node.type'; +import { getCustomErrorMessage } from '../helpers/error-handling'; export async function router(this: IExecuteFunctions) { const returnData: INodeExecutionData[] = []; @@ -61,6 +62,15 @@ export async function router(this: IExecuteFunctions) { } if (error instanceof NodeApiError) { + // If the error is a rate limit error, we want to handle it differently + const errorCode: string | undefined = (error.cause as any).error?.error?.code; + if (errorCode) { + const customErrorMessage = getCustomErrorMessage(errorCode); + if (customErrorMessage) { + error.message = customErrorMessage; + } + } + error.context = { itemIndex: i, }; diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/OpenAi/helpers/error-handling.ts b/packages/@n8n/nodes-langchain/nodes/vendors/OpenAi/helpers/error-handling.ts new file mode 100644 index 0000000000..8db54a1b86 --- /dev/null +++ b/packages/@n8n/nodes-langchain/nodes/vendors/OpenAi/helpers/error-handling.ts @@ -0,0 +1,14 @@ +import { OpenAIError } from 'openai/error'; + +const errorMap: Record = { + insufficient_quota: 'OpenAI: Insufficient quota', + rate_limit_exceeded: 'OpenAI: Rate limit reached', +}; + +export function getCustomErrorMessage(errorCode: string): string | undefined { + return errorMap[errorCode]; +} + +export function isOpenAiError(error: any): error is OpenAIError { + return error instanceof OpenAIError; +}