n8n/packages/nodes-base/nodes/OpenAi/ChatDescription.ts

408 lines
8.9 KiB
TypeScript

import type { INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { sendErrorPostReceive } from './GenericFunctions';
export const chatOperations: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
displayOptions: {
show: {
resource: ['chat'],
},
},
options: [
{
name: 'Complete',
value: 'complete',
action: 'Create a Completion',
description: 'Create one or more completions for a given text',
routing: {
request: {
method: 'POST',
url: '/v1/chat/completions',
},
output: { postReceive: [sendErrorPostReceive] },
},
},
],
default: 'complete',
},
];
const completeOperations: INodeProperties[] = [
{
displayName: 'Model',
name: 'model',
type: 'options',
description:
'The model which will generate the completion. <a href="https://beta.openai.com/docs/models/overview">Learn more</a>.',
displayOptions: {
show: {
operation: ['complete'],
resource: ['chat'],
'@version': [1],
},
},
typeOptions: {
loadOptions: {
routing: {
request: {
method: 'GET',
url: '/v1/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: "={{ $responseItem.id.startsWith('gpt-') && !$responseItem.id.startsWith('gpt-4-vision') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
},
routing: {
send: {
type: 'body',
property: 'model',
},
},
default: 'gpt-3.5-turbo',
},
{
displayName: 'Model',
name: 'chatModel',
type: 'options',
description:
'The model which will generate the completion. <a href="https://beta.openai.com/docs/models/overview">Learn more</a>.',
displayOptions: {
show: {
operation: ['complete'],
resource: ['chat'],
},
hide: {
'@version': [1],
},
},
typeOptions: {
loadOptions: {
routing: {
request: {
method: 'GET',
url: '/v1/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'filter',
properties: {
pass: "={{ $responseItem.id.startsWith('gpt-') && !$responseItem.id.startsWith('gpt-4-vision') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
},
routing: {
send: {
type: 'body',
property: 'model',
},
},
default: 'gpt-3.5-turbo',
},
{
displayName: 'Prompt',
name: 'prompt',
type: 'fixedCollection',
typeOptions: {
sortable: true,
multipleValues: true,
},
displayOptions: {
show: {
resource: ['chat'],
operation: ['complete'],
},
},
placeholder: 'Add Message',
default: {},
options: [
{
displayName: 'Messages',
name: 'messages',
values: [
{
displayName: 'Role',
name: 'role',
type: 'options',
options: [
{
name: 'Assistant',
value: 'assistant',
},
{
name: 'System',
value: 'system',
},
{
name: 'User',
value: 'user',
},
],
default: 'user',
},
{
displayName: 'Content',
name: 'content',
type: 'string',
default: '',
},
],
},
],
routing: {
send: {
type: 'body',
property: 'messages',
value: '={{ $value.messages }}',
},
},
},
];
const sharedOperations: INodeProperties[] = [
{
displayName: 'Simplify',
name: 'simplifyOutput',
type: 'boolean',
default: true,
displayOptions: {
show: {
operation: ['complete'],
resource: ['chat'],
},
},
routing: {
output: {
postReceive: [
{
type: 'set',
enabled: '={{$value}}',
properties: {
value: '={{ { "data": $response.body.choices } }}',
},
},
{
type: 'rootProperty',
enabled: '={{$value}}',
properties: {
property: 'data',
},
},
async function (items: INodeExecutionData[]): Promise<INodeExecutionData[]> {
if (this.getNode().parameters.simplifyOutput === false) {
return items;
}
return items.map((item) => {
return {
json: {
...item.json,
message: item.json.message,
},
};
});
},
],
},
},
description: 'Whether to return a simplified version of the response instead of the raw data',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
description: 'Additional options to add',
type: 'collection',
default: {},
displayOptions: {
show: {
operation: ['complete'],
resource: ['chat'],
},
},
options: [
{
displayName: 'Echo Prompt',
name: 'echo',
type: 'boolean',
description: 'Whether the prompt should be echo back in addition to the completion',
default: false,
displayOptions: {
show: {
'/operation': ['complete'],
},
},
routing: {
send: {
type: 'body',
property: 'echo',
},
},
},
{
displayName: 'Frequency Penalty',
name: 'frequency_penalty',
default: 0,
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
description:
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
type: 'number',
routing: {
send: {
type: 'body',
property: 'frequency_penalty',
},
},
},
{
displayName: 'Maximum Number of Tokens',
name: 'maxTokens',
default: 16,
description:
'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',
type: 'number',
displayOptions: {
show: {
'/operation': ['complete'],
},
},
typeOptions: {
maxValue: 32768,
},
routing: {
send: {
type: 'body',
property: 'max_tokens',
},
},
},
{
displayName: 'Number of Completions',
name: 'n',
default: 1,
description:
'How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop.',
type: 'number',
routing: {
send: {
type: 'body',
property: 'n',
},
},
},
{
displayName: 'Presence Penalty',
name: 'presence_penalty',
default: 0,
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
description:
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
type: 'number',
routing: {
send: {
type: 'body',
property: 'presence_penalty',
},
},
},
{
displayName: 'Sampling Temperature',
name: 'temperature',
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
routing: {
send: {
type: 'body',
property: 'temperature',
},
},
},
{
displayName: 'Top P',
name: 'topP',
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
type: 'number',
routing: {
send: {
type: 'body',
property: 'top_p',
},
},
},
],
},
];
export const chatFields: INodeProperties[] = [
/* -------------------------------------------------------------------------- */
/* chat:complete */
/* -------------------------------------------------------------------------- */
...completeOperations,
/* -------------------------------------------------------------------------- */
/* chat:ALL */
/* -------------------------------------------------------------------------- */
...sharedOperations,
];