mirror of
https://github.com/n8n-io/n8n.git
synced 2024-11-15 00:54:06 -08:00
205 lines
6 KiB
TypeScript
205 lines
6 KiB
TypeScript
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
|
|
import {
|
|
NodeConnectionType,
|
|
type INodeType,
|
|
type INodeTypeDescription,
|
|
type ISupplyDataFunctions,
|
|
type SupplyData,
|
|
} from 'n8n-workflow';
|
|
|
|
import { ChatOpenAI } from '@langchain/openai';
|
|
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
|
|
import { N8nLlmTracing } from '../N8nLlmTracing';
|
|
|
|
export class LmChatAzureOpenAi implements INodeType {
|
|
description: INodeTypeDescription = {
|
|
displayName: 'Azure OpenAI Chat Model',
|
|
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
|
|
name: 'lmChatAzureOpenAi',
|
|
icon: 'file:azure.svg',
|
|
group: ['transform'],
|
|
version: 1,
|
|
description: 'For advanced usage with an AI chain',
|
|
defaults: {
|
|
name: 'Azure OpenAI Chat Model',
|
|
},
|
|
codex: {
|
|
categories: ['AI'],
|
|
subcategories: {
|
|
AI: ['Language Models', 'Root Nodes'],
|
|
'Language Models': ['Chat Models (Recommended)'],
|
|
},
|
|
resources: {
|
|
primaryDocumentation: [
|
|
{
|
|
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatazureopenai/',
|
|
},
|
|
],
|
|
},
|
|
},
|
|
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
|
|
inputs: [],
|
|
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
|
|
outputs: [NodeConnectionType.AiLanguageModel],
|
|
outputNames: ['Model'],
|
|
credentials: [
|
|
{
|
|
name: 'azureOpenAiApi',
|
|
required: true,
|
|
},
|
|
],
|
|
properties: [
|
|
getConnectionHintNoticeField([NodeConnectionType.AiChain, NodeConnectionType.AiAgent]),
|
|
{
|
|
displayName:
|
|
'If using JSON response format, you must include word "json" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',
|
|
name: 'notice',
|
|
type: 'notice',
|
|
default: '',
|
|
displayOptions: {
|
|
show: {
|
|
'/options.responseFormat': ['json_object'],
|
|
},
|
|
},
|
|
},
|
|
{
|
|
displayName: 'Model (Deployment) Name',
|
|
name: 'model',
|
|
type: 'string',
|
|
description: 'The name of the model(deployment) to use',
|
|
default: '',
|
|
},
|
|
{
|
|
displayName: 'Options',
|
|
name: 'options',
|
|
placeholder: 'Add Option',
|
|
description: 'Additional options to add',
|
|
type: 'collection',
|
|
default: {},
|
|
options: [
|
|
{
|
|
displayName: 'Frequency Penalty',
|
|
name: 'frequencyPenalty',
|
|
default: 0,
|
|
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
|
|
description:
|
|
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
|
|
type: 'number',
|
|
},
|
|
{
|
|
displayName: 'Maximum Number of Tokens',
|
|
name: 'maxTokens',
|
|
default: -1,
|
|
description:
|
|
'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',
|
|
type: 'number',
|
|
typeOptions: {
|
|
maxValue: 32768,
|
|
},
|
|
},
|
|
{
|
|
displayName: 'Response Format',
|
|
name: 'responseFormat',
|
|
default: 'text',
|
|
type: 'options',
|
|
options: [
|
|
{
|
|
name: 'Text',
|
|
value: 'text',
|
|
description: 'Regular text response',
|
|
},
|
|
{
|
|
name: 'JSON',
|
|
value: 'json_object',
|
|
description:
|
|
'Enables JSON mode, which should guarantee the message the model generates is valid JSON',
|
|
},
|
|
],
|
|
},
|
|
{
|
|
displayName: 'Presence Penalty',
|
|
name: 'presencePenalty',
|
|
default: 0,
|
|
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
|
|
description:
|
|
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
|
|
type: 'number',
|
|
},
|
|
{
|
|
displayName: 'Sampling Temperature',
|
|
name: 'temperature',
|
|
default: 0.7,
|
|
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
|
|
description:
|
|
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
|
|
type: 'number',
|
|
},
|
|
{
|
|
displayName: 'Timeout',
|
|
name: 'timeout',
|
|
default: 60000,
|
|
description: 'Maximum amount of time a request is allowed to take in milliseconds',
|
|
type: 'number',
|
|
},
|
|
{
|
|
displayName: 'Max Retries',
|
|
name: 'maxRetries',
|
|
default: 2,
|
|
description: 'Maximum number of retries to attempt',
|
|
type: 'number',
|
|
},
|
|
{
|
|
displayName: 'Top P',
|
|
name: 'topP',
|
|
default: 1,
|
|
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
|
|
description:
|
|
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
|
|
type: 'number',
|
|
},
|
|
],
|
|
},
|
|
],
|
|
};
|
|
|
|
async supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {
|
|
const credentials = await this.getCredentials<{
|
|
apiKey: string;
|
|
resourceName: string;
|
|
apiVersion: string;
|
|
}>('azureOpenAiApi');
|
|
|
|
const modelName = this.getNodeParameter('model', itemIndex) as string;
|
|
const options = this.getNodeParameter('options', itemIndex, {}) as {
|
|
frequencyPenalty?: number;
|
|
maxTokens?: number;
|
|
maxRetries: number;
|
|
timeout: number;
|
|
presencePenalty?: number;
|
|
temperature?: number;
|
|
topP?: number;
|
|
responseFormat?: 'text' | 'json_object';
|
|
};
|
|
|
|
const model = new ChatOpenAI({
|
|
azureOpenAIApiDeploymentName: modelName,
|
|
azureOpenAIApiInstanceName: credentials.resourceName,
|
|
azureOpenAIApiKey: credentials.apiKey,
|
|
azureOpenAIApiVersion: credentials.apiVersion,
|
|
...options,
|
|
timeout: options.timeout ?? 60000,
|
|
maxRetries: options.maxRetries ?? 2,
|
|
callbacks: [new N8nLlmTracing(this)],
|
|
modelKwargs: options.responseFormat
|
|
? {
|
|
response_format: { type: options.responseFormat },
|
|
}
|
|
: undefined,
|
|
});
|
|
|
|
return {
|
|
response: model,
|
|
};
|
|
}
|
|
}
|