fix(OpenAI Node, Basic LLM Chain Node, Tool Agent Node): Better OpenAI API rate limit errors (#10797)

This commit is contained in:
Eugene 2024-09-18 10:52:10 +02:00 committed by GitHub
parent df8b2c0694
commit ab83c4b416
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 69 additions and 1 deletions

View file

@ -1,4 +1,9 @@
import { ApplicationError, NodeConnectionType, NodeOperationError } from 'n8n-workflow';
import {
ApplicationError,
NodeApiError,
NodeConnectionType,
NodeOperationError,
} from 'n8n-workflow';
import type {
IBinaryData,
IDataObject,
@ -30,6 +35,10 @@ import {
isChatInstance,
} from '../../../utils/helpers';
import { getTracingConfig } from '../../../utils/tracing';
import {
getCustomErrorMessage as getCustomOpenAiErrorMessage,
isOpenAiError,
} from '../../vendors/OpenAi/helpers/error-handling';
interface MessagesTemplate {
type: string;
@ -580,6 +589,18 @@ export class ChainLlm implements INodeType {
});
});
} catch (error) {
// If the error is an OpenAI's rate limit error, we want to handle it differently
// because OpenAI has multiple different rate limit errors
if (error instanceof NodeApiError && isOpenAiError(error.cause)) {
const openAiErrorCode: string | undefined = (error.cause as any).error?.code;
if (openAiErrorCode) {
const customMessage = getCustomOpenAiErrorMessage(openAiErrorCode);
if (customMessage) {
error.message = customMessage;
}
}
}
if (this.continueOnFail()) {
returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });
continue;

View file

@ -5,11 +5,15 @@ import {
type INodeType,
type INodeTypeDescription,
type SupplyData,
type JsonObject,
NodeApiError,
} from 'n8n-workflow';
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
import { RateLimitError } from 'openai';
import { getCustomErrorMessage } from '../../vendors/OpenAi/helpers/error-handling';
export class LmChatOpenAi implements INodeType {
description: INodeTypeDescription = {
@ -272,6 +276,25 @@ export class LmChatOpenAi implements INodeType {
response_format: { type: options.responseFormat },
}
: undefined,
onFailedAttempt: (error: any) => {
// If the error is a rate limit error, we want to handle it differently
// because OpenAI has multiple different rate limit errors
if (error instanceof RateLimitError) {
const errorCode = error?.code;
if (errorCode) {
const customErrorMessage = getCustomErrorMessage(errorCode);
const apiError = new NodeApiError(this.getNode(), error as unknown as JsonObject);
if (customErrorMessage) {
apiError.message = customErrorMessage;
}
throw apiError;
}
}
throw error;
},
});
return {

View file

@ -12,6 +12,7 @@ import * as image from './image';
import * as text from './text';
import type { OpenAiType } from './node.type';
import { getCustomErrorMessage } from '../helpers/error-handling';
export async function router(this: IExecuteFunctions) {
const returnData: INodeExecutionData[] = [];
@ -61,6 +62,15 @@ export async function router(this: IExecuteFunctions) {
}
if (error instanceof NodeApiError) {
// If the error is a rate limit error, we want to handle it differently
const errorCode: string | undefined = (error.cause as any).error?.error?.code;
if (errorCode) {
const customErrorMessage = getCustomErrorMessage(errorCode);
if (customErrorMessage) {
error.message = customErrorMessage;
}
}
error.context = {
itemIndex: i,
};

View file

@ -0,0 +1,14 @@
import { OpenAIError } from 'openai/error';
const errorMap: Record<string, string> = {
insufficient_quota: 'OpenAI: Insufficient quota',
rate_limit_exceeded: 'OpenAI: Rate limit reached',
};
export function getCustomErrorMessage(errorCode: string): string | undefined {
return errorMap[errorCode];
}
export function isOpenAiError(error: any): error is OpenAIError {
return error instanceof OpenAIError;
}