2024-02-01 07:48:18 -08:00
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import {
NodeConnectionType ,
type IExecuteFunctions ,
type INodeType ,
type INodeTypeDescription ,
type SupplyData ,
} from 'n8n-workflow' ;
2024-03-07 02:36:36 -08:00
import type { ClientOptions } from '@langchain/openai' ;
import { ChatOpenAI } from '@langchain/openai' ;
2024-02-01 07:48:18 -08:00
import { logWrapper } from '../../../utils/logWrapper' ;
import { getConnectionHintNoticeField } from '../../../utils/sharedFields' ;
export class LmChatAzureOpenAi implements INodeType {
description : INodeTypeDescription = {
displayName : 'Azure OpenAI Chat Model' ,
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
name : 'lmChatAzureOpenAi' ,
icon : 'file:azure.svg' ,
group : [ 'transform' ] ,
version : 1 ,
description : 'For advanced usage with an AI chain' ,
defaults : {
name : 'Azure OpenAI Chat Model' ,
} ,
codex : {
categories : [ 'AI' ] ,
subcategories : {
AI : [ 'Language Models' ] ,
} ,
resources : {
primaryDocumentation : [
{
url : 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatazureopenai/' ,
} ,
] ,
} ,
} ,
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
inputs : [ ] ,
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
outputs : [ NodeConnectionType . AiLanguageModel ] ,
outputNames : [ 'Model' ] ,
credentials : [
{
name : 'azureOpenAiApi' ,
required : true ,
} ,
] ,
properties : [
getConnectionHintNoticeField ( [ NodeConnectionType . AiChain , NodeConnectionType . AiAgent ] ) ,
{
displayName : 'Model (Deployment) Name' ,
name : 'model' ,
type : 'string' ,
description : 'The name of the model(deployment) to use' ,
default : '' ,
} ,
{
displayName : 'Options' ,
name : 'options' ,
placeholder : 'Add Option' ,
description : 'Additional options to add' ,
type : 'collection' ,
default : { } ,
options : [
{
displayName : 'Frequency Penalty' ,
name : 'frequencyPenalty' ,
default : 0 ,
typeOptions : { maxValue : 2 , minValue : - 2 , numberPrecision : 1 } ,
description :
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim" ,
type : 'number' ,
} ,
{
displayName : 'Maximum Number of Tokens' ,
name : 'maxTokens' ,
default : - 1 ,
description :
'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).' ,
type : 'number' ,
typeOptions : {
maxValue : 32768 ,
} ,
} ,
{
displayName : 'Presence Penalty' ,
name : 'presencePenalty' ,
default : 0 ,
typeOptions : { maxValue : 2 , minValue : - 2 , numberPrecision : 1 } ,
description :
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics" ,
type : 'number' ,
} ,
{
displayName : 'Sampling Temperature' ,
name : 'temperature' ,
default : 0.7 ,
typeOptions : { maxValue : 1 , minValue : 0 , numberPrecision : 1 } ,
description :
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.' ,
type : 'number' ,
} ,
{
displayName : 'Timeout' ,
name : 'timeout' ,
default : 60000 ,
description : 'Maximum amount of time a request is allowed to take in milliseconds' ,
type : 'number' ,
} ,
{
displayName : 'Max Retries' ,
name : 'maxRetries' ,
default : 2 ,
description : 'Maximum number of retries to attempt' ,
type : 'number' ,
} ,
{
displayName : 'Top P' ,
name : 'topP' ,
default : 1 ,
typeOptions : { maxValue : 1 , minValue : 0 , numberPrecision : 1 } ,
description :
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.' ,
type : 'number' ,
} ,
] ,
} ,
] ,
} ;
async supplyData ( this : IExecuteFunctions , itemIndex : number ) : Promise < SupplyData > {
const credentials = ( await this . getCredentials ( 'azureOpenAiApi' ) ) as {
apiKey : string ;
resourceName : string ;
apiVersion : string ;
} ;
const modelName = this . getNodeParameter ( 'model' , itemIndex ) as string ;
const options = this . getNodeParameter ( 'options' , itemIndex , { } ) as {
frequencyPenalty? : number ;
maxTokens? : number ;
maxRetries : number ;
timeout : number ;
presencePenalty? : number ;
temperature? : number ;
topP? : number ;
} ;
const configuration : ClientOptions = { } ;
const model = new ChatOpenAI ( {
azureOpenAIApiDeploymentName : modelName ,
azureOpenAIApiInstanceName : credentials.resourceName ,
azureOpenAIApiKey : credentials.apiKey ,
azureOpenAIApiVersion : credentials.apiVersion ,
. . . options ,
timeout : options.timeout ? ? 60000 ,
maxRetries : options.maxRetries ? ? 2 ,
configuration ,
} ) ;
return {
response : logWrapper ( model , this ) ,
} ;
}
}