fix(Azure OpenAI Chat Model Node): Add response format option (#10851)

This commit is contained in:
Eugene 2024-09-17 16:27:21 +02:00 committed by GitHub
parent d65ade4e92
commit 0b5299a248
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -7,7 +7,6 @@ import {
type SupplyData,
} from 'n8n-workflow';
import type { ClientOptions } from '@langchain/openai';
import { ChatOpenAI } from '@langchain/openai';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { N8nLlmTracing } from '../N8nLlmTracing';
@ -51,6 +50,18 @@ export class LmChatAzureOpenAi implements INodeType {
],
properties: [
getConnectionHintNoticeField([NodeConnectionType.AiChain, NodeConnectionType.AiAgent]),
{
displayName:
'If using JSON response format, you must include word "json" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',
name: 'notice',
type: 'notice',
default: '',
displayOptions: {
show: {
'/options.responseFormat': ['json_object'],
},
},
},
{
displayName: 'Model (Deployment) Name',
name: 'model',
@ -86,6 +97,25 @@ export class LmChatAzureOpenAi implements INodeType {
maxValue: 32768,
},
},
{
displayName: 'Response Format',
name: 'responseFormat',
default: 'text',
type: 'options',
options: [
{
name: 'Text',
value: 'text',
description: 'Regular text response',
},
{
name: 'JSON',
value: 'json_object',
description:
'Enables JSON mode, which should guarantee the message the model generates is valid JSON',
},
],
},
{
displayName: 'Presence Penalty',
name: 'presencePenalty',
@ -148,10 +178,9 @@ export class LmChatAzureOpenAi implements INodeType {
presencePenalty?: number;
temperature?: number;
topP?: number;
responseFormat?: 'text' | 'json_object';
};
const configuration: ClientOptions = {};
const model = new ChatOpenAI({
azureOpenAIApiDeploymentName: modelName,
azureOpenAIApiInstanceName: credentials.resourceName,
@ -160,8 +189,12 @@ export class LmChatAzureOpenAi implements INodeType {
...options,
timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2,
configuration,
callbacks: [new N8nLlmTracing(this)],
modelKwargs: options.responseFormat
? {
response_format: { type: options.responseFormat },
}
: undefined,
});
return {