2023-11-29 03:13:55 -08:00
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import {
NodeConnectionType ,
type INodeType ,
type INodeTypeDescription ,
2024-10-28 03:37:23 -07:00
type ISupplyDataFunctions ,
2023-11-29 03:13:55 -08:00
type SupplyData ,
2024-09-18 01:52:10 -07:00
type JsonObject ,
NodeApiError ,
2023-11-29 03:13:55 -08:00
} from 'n8n-workflow' ;
2024-03-07 02:36:36 -08:00
import { ChatOpenAI , type ClientOptions } from '@langchain/openai' ;
2023-11-29 03:13:55 -08:00
import { getConnectionHintNoticeField } from '../../../utils/sharedFields' ;
2024-05-12 12:12:07 -07:00
import { N8nLlmTracing } from '../N8nLlmTracing' ;
2024-09-18 01:52:10 -07:00
import { RateLimitError } from 'openai' ;
import { getCustomErrorMessage } from '../../vendors/OpenAi/helpers/error-handling' ;
2023-11-29 03:13:55 -08:00
export class LmChatOpenAi implements INodeType {
description : INodeTypeDescription = {
displayName : 'OpenAI Chat Model' ,
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
name : 'lmChatOpenAi' ,
2024-06-06 04:34:30 -07:00
icon : { light : 'file:openAiLight.svg' , dark : 'file:openAiLight.dark.svg' } ,
2023-11-29 03:13:55 -08:00
group : [ 'transform' ] ,
version : 1 ,
description : 'For advanced usage with an AI chain' ,
defaults : {
name : 'OpenAI Chat Model' ,
} ,
codex : {
categories : [ 'AI' ] ,
subcategories : {
2024-08-05 04:59:02 -07:00
AI : [ 'Language Models' , 'Root Nodes' ] ,
2024-07-23 07:40:28 -07:00
'Language Models' : [ 'Chat Models (Recommended)' ] ,
2023-11-29 03:13:55 -08:00
} ,
resources : {
primaryDocumentation : [
{
url : 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatopenai/' ,
} ,
] ,
} ,
} ,
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
inputs : [ ] ,
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
outputs : [ NodeConnectionType . AiLanguageModel ] ,
outputNames : [ 'Model' ] ,
credentials : [
{
name : 'openAiApi' ,
required : true ,
} ,
] ,
requestDefaults : {
ignoreHttpStatusErrors : true ,
baseURL :
'={{ $parameter.options?.baseURL?.split("/").slice(0,-1).join("/") || "https://api.openai.com" }}' ,
} ,
properties : [
getConnectionHintNoticeField ( [ NodeConnectionType . AiChain , NodeConnectionType . AiAgent ] ) ,
{
displayName :
'If using JSON response format, you must include word "json" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.' ,
name : 'notice' ,
type : 'notice' ,
default : '' ,
displayOptions : {
show : {
'/options.responseFormat' : [ 'json_object' ] ,
} ,
} ,
} ,
{
displayName : 'Model' ,
name : 'model' ,
type : 'options' ,
description :
'The model which will generate the completion. <a href="https://beta.openai.com/docs/models/overview">Learn more</a>.' ,
typeOptions : {
loadOptions : {
routing : {
request : {
method : 'GET' ,
url : '={{ $parameter.options?.baseURL?.split("/").slice(-1).pop() || "v1" }}/models' ,
} ,
output : {
postReceive : [
{
type : 'rootProperty' ,
properties : {
property : 'data' ,
} ,
} ,
{
type : 'filter' ,
properties : {
2024-07-16 23:25:16 -07:00
// If the baseURL is not set or is set to api.openai.com, include only chat models
pass : ` ={{
( $parameter . options ? . baseURL && ! $parameter . options ? . baseURL ? . includes ( 'api.openai.com' ) ) ||
2024-09-09 02:12:22 -07:00
$responseItem . id . startsWith ( 'ft:' ) ||
2024-09-12 12:09:59 -07:00
$responseItem . id . startsWith ( 'o1' ) ||
2024-07-16 23:25:16 -07:00
( $responseItem . id . startsWith ( 'gpt-' ) && ! $responseItem . id . includes ( 'instruct' ) )
} } ` ,
2023-11-29 03:13:55 -08:00
} ,
} ,
{
type : 'setKeyValue' ,
properties : {
name : '={{$responseItem.id}}' ,
value : '={{$responseItem.id}}' ,
} ,
} ,
{
type : 'sort' ,
properties : {
key : 'name' ,
} ,
} ,
] ,
} ,
} ,
} ,
} ,
routing : {
send : {
type : 'body' ,
property : 'model' ,
} ,
} ,
2024-10-18 00:35:33 -07:00
default : 'gpt-4o-mini' ,
2023-11-29 03:13:55 -08:00
} ,
2024-07-16 23:25:16 -07:00
{
displayName :
'When using non-OpenAI models via "Base URL" override, not all models might be chat-compatible or support other features, like tools calling or JSON response format' ,
name : 'notice' ,
type : 'notice' ,
default : '' ,
displayOptions : {
show : {
'/options.baseURL' : [ { _cnd : { exists : true } } ] ,
} ,
} ,
} ,
2023-11-29 03:13:55 -08:00
{
displayName : 'Options' ,
name : 'options' ,
placeholder : 'Add Option' ,
description : 'Additional options to add' ,
type : 'collection' ,
default : { } ,
options : [
{
displayName : 'Base URL' ,
name : 'baseURL' ,
default : 'https://api.openai.com/v1' ,
description : 'Override the default base URL for the API' ,
type : 'string' ,
} ,
{
displayName : 'Frequency Penalty' ,
name : 'frequencyPenalty' ,
default : 0 ,
typeOptions : { maxValue : 2 , minValue : - 2 , numberPrecision : 1 } ,
description :
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim" ,
type : 'number' ,
} ,
{
displayName : 'Maximum Number of Tokens' ,
name : 'maxTokens' ,
default : - 1 ,
description :
'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).' ,
type : 'number' ,
typeOptions : {
maxValue : 32768 ,
} ,
} ,
{
displayName : 'Response Format' ,
name : 'responseFormat' ,
default : 'text' ,
type : 'options' ,
options : [
{
name : 'Text' ,
value : 'text' ,
description : 'Regular text response' ,
} ,
{
name : 'JSON' ,
value : 'json_object' ,
description :
'Enables JSON mode, which should guarantee the message the model generates is valid JSON' ,
} ,
] ,
} ,
{
displayName : 'Presence Penalty' ,
name : 'presencePenalty' ,
default : 0 ,
typeOptions : { maxValue : 2 , minValue : - 2 , numberPrecision : 1 } ,
description :
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics" ,
type : 'number' ,
} ,
{
displayName : 'Sampling Temperature' ,
name : 'temperature' ,
default : 0.7 ,
typeOptions : { maxValue : 1 , minValue : 0 , numberPrecision : 1 } ,
description :
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.' ,
type : 'number' ,
} ,
{
displayName : 'Timeout' ,
name : 'timeout' ,
default : 60000 ,
description : 'Maximum amount of time a request is allowed to take in milliseconds' ,
type : 'number' ,
} ,
{
displayName : 'Max Retries' ,
name : 'maxRetries' ,
default : 2 ,
description : 'Maximum number of retries to attempt' ,
type : 'number' ,
} ,
{
displayName : 'Top P' ,
name : 'topP' ,
default : 1 ,
typeOptions : { maxValue : 1 , minValue : 0 , numberPrecision : 1 } ,
description :
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.' ,
type : 'number' ,
} ,
] ,
} ,
] ,
} ;
2024-10-28 03:37:23 -07:00
async supplyData ( this : ISupplyDataFunctions , itemIndex : number ) : Promise < SupplyData > {
2023-11-29 03:13:55 -08:00
const credentials = await this . getCredentials ( 'openAiApi' ) ;
const modelName = this . getNodeParameter ( 'model' , itemIndex ) as string ;
const options = this . getNodeParameter ( 'options' , itemIndex , { } ) as {
baseURL? : string ;
frequencyPenalty? : number ;
maxTokens? : number ;
maxRetries : number ;
timeout : number ;
presencePenalty? : number ;
temperature? : number ;
topP? : number ;
responseFormat ? : 'text' | 'json_object' ;
} ;
const configuration : ClientOptions = { } ;
if ( options . baseURL ) {
configuration . baseURL = options . baseURL ;
}
const model = new ChatOpenAI ( {
openAIApiKey : credentials.apiKey as string ,
modelName ,
. . . options ,
timeout : options.timeout ? ? 60000 ,
maxRetries : options.maxRetries ? ? 2 ,
configuration ,
2024-05-12 12:12:07 -07:00
callbacks : [ new N8nLlmTracing ( this ) ] ,
2023-11-29 03:13:55 -08:00
modelKwargs : options.responseFormat
? {
response_format : { type : options . responseFormat } ,
2024-03-26 06:22:57 -07:00
}
2023-11-29 03:13:55 -08:00
: undefined ,
2024-09-18 01:52:10 -07:00
onFailedAttempt : ( error : any ) = > {
// If the error is a rate limit error, we want to handle it differently
// because OpenAI has multiple different rate limit errors
if ( error instanceof RateLimitError ) {
const errorCode = error ? . code ;
if ( errorCode ) {
const customErrorMessage = getCustomErrorMessage ( errorCode ) ;
const apiError = new NodeApiError ( this . getNode ( ) , error as unknown as JsonObject ) ;
if ( customErrorMessage ) {
apiError . message = customErrorMessage ;
}
throw apiError ;
}
}
throw error ;
} ,
2023-11-29 03:13:55 -08:00
} ) ;
return {
2024-05-12 12:12:07 -07:00
response : model ,
2023-11-29 03:13:55 -08:00
} ;
}
}