feat(OpenAI Node): Overhaul (#8335)

This commit is contained in:
Michael Kret 2024-02-15 10:15:58 +02:00 committed by GitHub
parent 2b9391a975
commit 941278db68
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
49 changed files with 3542 additions and 20 deletions

View file

@ -0,0 +1,2 @@
/** @type {import('jest').Config} */
module.exports = require('../../../jest.config');

View file

@ -16,6 +16,7 @@ export class OpenAiAssistant implements INodeType {
description: INodeTypeDescription = {
displayName: 'OpenAI Assistant',
name: 'openAiAssistant',
hidden: true,
icon: 'fa:robot',
group: ['transform'],
version: 1,

View file

@ -46,6 +46,13 @@ export class ToolCode implements INodeType {
outputNames: ['Tool'],
properties: [
getConnectionHintNoticeField([NodeConnectionType.AiAgent]),
{
displayName:
'See an example of a conversational agent with custom tool written in JavaScript <a href="/templates/1963" target="_blank">here</a>.',
name: 'noticeTemplateExample',
type: 'notice',
default: '',
},
{
displayName: 'Name',
name: 'name',
@ -95,11 +102,12 @@ export class ToolCode implements INodeType {
editor: 'codeNodeEditor',
editorLanguage: 'javaScript',
},
default: '',
default:
'// Example: convert the incoming query to uppercase and return it\nreturn query.toUpperCase()',
// TODO: Add proper text here later
hint: 'You can access the input the tool receives via the input property "query". The returned value should be a single string.',
description:
'JavaScript code to execute.<br><br>Tip: You can use luxon vars like <code>$today</code> for dates and <code>$jmespath</code> for querying JSON structures. <a href="https://docs.n8n.io/nodes/n8n-nodes-base.function">Learn more</a>.',
// eslint-disable-next-line n8n-nodes-base/node-param-description-missing-final-period
description: 'E.g. Converts any text to uppercase',
noDataExpression: true,
},
{
@ -115,11 +123,12 @@ export class ToolCode implements INodeType {
editor: 'codeNodeEditor',
editorLanguage: 'python',
},
default: '',
default:
'# Example: convert the incoming query to uppercase and return it\nreturn query.upper()',
// TODO: Add proper text here later
hint: 'You can access the input the tool receives via the input property "query". The returned value should be a single string.',
description:
'Python code to execute.<br><br>Tip: You can use built-in methods and variables like <code>_today</code> for dates and <code>_jmespath</code> for querying JSON structures. <a href="https://docs.n8n.io/code/builtin/">Learn more</a>.',
// eslint-disable-next-line n8n-nodes-base/node-param-description-missing-final-period
description: 'E.g. Converts any text to uppercase',
noDataExpression: true,
},
],

View file

@ -49,6 +49,13 @@ export class ToolWorkflow implements INodeType {
outputNames: ['Tool'],
properties: [
getConnectionHintNoticeField([NodeConnectionType.AiAgent]),
{
displayName:
'See an example of a workflow to suggest meeting slots using AI <a href="/templates/1953" target="_blank">here</a>.',
name: 'noticeTemplateExample',
type: 'notice',
default: '',
},
{
displayName: 'Name',
name: 'name',

View file

@ -0,0 +1,17 @@
import type { IExecuteFunctions, INodeType } from 'n8n-workflow';
import { router } from './actions/router';
import { versionDescription } from './actions/versionDescription';
import { listSearch, loadOptions } from './methods';
export class OpenAi implements INodeType {
description = versionDescription;
methods = {
listSearch,
loadOptions,
};
async execute(this: IExecuteFunctions) {
return await router.call(this);
}
}

View file

@ -0,0 +1,252 @@
import type {
INodeProperties,
IExecuteFunctions,
INodeExecutionData,
IDataObject,
} from 'n8n-workflow';
import { NodeOperationError, updateDisplayOptions } from 'n8n-workflow';
import { apiRequest } from '../../transport';
import { modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
modelRLC,
{
displayName: 'Name',
name: 'name',
type: 'string',
default: '',
description: 'The name of the assistant. The maximum length is 256 characters.',
placeholder: 'e.g. My Assistant',
required: true,
},
{
displayName: 'Description',
name: 'description',
type: 'string',
default: '',
description: 'The description of the assistant. The maximum length is 512 characters.',
placeholder: 'e.g. My personal assistant',
},
{
displayName: 'Instructions',
name: 'instructions',
type: 'string',
description:
'The system instructions that the assistant uses. The maximum length is 32768 characters.',
default: '',
typeOptions: {
rows: 2,
},
},
{
displayName: 'Code Interpreter',
name: 'codeInterpreter',
type: 'boolean',
default: false,
description:
'Whether to enable the code interpreter that allows the assistants to write and run Python code in a sandboxed execution environment, find more <a href="https://platform.openai.com/docs/assistants/tools/code-interpreter" target="_blank">here</a>',
},
{
displayName: 'Knowledge Retrieval',
name: 'knowledgeRetrieval',
type: 'boolean',
default: false,
description:
'Whether to augments the assistant with knowledge from outside its model, such as proprietary product information or documents, find more <a href="https://platform.openai.com/docs/assistants/tools/knowledge-retrieval" target="_blank">here</a>',
},
//we want to display Files selector only when codeInterpreter true or knowledgeRetrieval true or both
{
// eslint-disable-next-line n8n-nodes-base/node-param-display-name-wrong-for-dynamic-multi-options
displayName: 'Files',
name: 'file_ids',
type: 'multiOptions',
// eslint-disable-next-line n8n-nodes-base/node-param-description-wrong-for-dynamic-multi-options
description:
'The files to be used by the assistant, there can be a maximum of 20 files attached to the assistant',
typeOptions: {
loadOptionsMethod: 'getFiles',
},
default: [],
hint: "Add more files by using the 'Upload a File' operation",
displayOptions: {
show: {
codeInterpreter: [true],
},
hide: {
knowledgeRetrieval: [true],
},
},
},
{
// eslint-disable-next-line n8n-nodes-base/node-param-display-name-wrong-for-dynamic-multi-options
displayName: 'Files',
name: 'file_ids',
type: 'multiOptions',
// eslint-disable-next-line n8n-nodes-base/node-param-description-wrong-for-dynamic-multi-options
description:
'The files to be used by the assistant, there can be a maximum of 20 files attached to the assistant',
typeOptions: {
loadOptionsMethod: 'getFiles',
},
default: [],
hint: "Add more files by using the 'Upload a File' operation",
displayOptions: {
show: {
knowledgeRetrieval: [true],
},
hide: {
codeInterpreter: [true],
},
},
},
{
// eslint-disable-next-line n8n-nodes-base/node-param-display-name-wrong-for-dynamic-multi-options
displayName: 'Files',
name: 'file_ids',
type: 'multiOptions',
// eslint-disable-next-line n8n-nodes-base/node-param-description-wrong-for-dynamic-multi-options
description:
'The files to be used by the assistant, there can be a maximum of 20 files attached to the assistant',
typeOptions: {
loadOptionsMethod: 'getFiles',
},
default: [],
hint: "Add more files by using the 'Upload a File' operation",
displayOptions: {
show: {
knowledgeRetrieval: [true],
codeInterpreter: [true],
},
},
},
{
displayName: "Add custom n8n tools when using the 'Message Assistant' operation",
name: 'noticeTools',
type: 'notice',
default: '',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Fail if Assistant Already Exists',
name: 'failIfExists',
type: 'boolean',
default: false,
description:
'Whether to fail an operation if the assistant with the same name already exists',
},
],
},
];
const displayOptions = {
show: {
operation: ['create'],
resource: ['assistant'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = this.getNodeParameter('modelId', i, '', { extractValue: true }) as string;
const name = this.getNodeParameter('name', i) as string;
const assistantDescription = this.getNodeParameter('description', i) as string;
const instructions = this.getNodeParameter('instructions', i) as string;
const codeInterpreter = this.getNodeParameter('codeInterpreter', i) as boolean;
const knowledgeRetrieval = this.getNodeParameter('knowledgeRetrieval', i) as boolean;
const file_ids = this.getNodeParameter('file_ids', i, []) as string[];
const options = this.getNodeParameter('options', i, {});
if (options.failIfExists) {
const assistants: string[] = [];
let has_more = true;
let after: string | undefined;
do {
const response = await apiRequest.call(this, 'GET', '/assistants', {
headers: {
'OpenAI-Beta': 'assistants=v1',
},
qs: {
limit: 100,
after,
},
});
for (const assistant of response.data || []) {
assistants.push(assistant.name);
}
has_more = response.has_more;
if (has_more) {
after = response.last_id as string;
} else {
break;
}
} while (has_more);
if (assistants.includes(name)) {
throw new NodeOperationError(
this.getNode(),
`An assistant with the same name '${name}' already exists`,
{ itemIndex: i },
);
}
}
if (file_ids.length > 20) {
throw new NodeOperationError(
this.getNode(),
'The maximum number of files that can be attached to the assistant is 20',
{ itemIndex: i },
);
}
const body: IDataObject = {
model,
name,
description: assistantDescription,
instructions,
file_ids,
};
const tools = [];
if (codeInterpreter) {
tools.push({
type: 'code_interpreter',
});
}
if (knowledgeRetrieval) {
tools.push({
type: 'retrieval',
});
}
if (tools.length) {
body.tools = tools;
}
const response = await apiRequest.call(this, 'POST', '/assistants', {
body,
headers: {
'OpenAI-Beta': 'assistants=v1',
},
});
return [
{
json: response,
pairedItem: { item: i },
},
];
}

View file

@ -0,0 +1,32 @@
import type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { apiRequest } from '../../transport';
import { assistantRLC } from '../descriptions';
const properties: INodeProperties[] = [assistantRLC];
const displayOptions = {
show: {
operation: ['deleteAssistant'],
resource: ['assistant'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const assistantId = this.getNodeParameter('assistantId', i, '', { extractValue: true }) as string;
const response = await apiRequest.call(this, 'DELETE', `/assistants/${assistantId}`, {
headers: {
'OpenAI-Beta': 'assistants=v1',
},
});
return [
{
json: response,
pairedItem: { item: i },
},
];
}

View file

@ -0,0 +1,62 @@
import type { INodeProperties } from 'n8n-workflow';
import * as create from './create.operation';
import * as deleteAssistant from './deleteAssistant.operation';
import * as message from './message.operation';
import * as list from './list.operation';
import * as update from './update.operation';
export { create, deleteAssistant, message, list, update };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Create an Assistant',
value: 'create',
action: 'Create an assistant',
description: 'Create a new assistant',
},
{
name: 'Delete an Assistant',
value: 'deleteAssistant',
action: 'Delete an assistant',
description: 'Delete an assistant from the account',
},
{
name: 'List Assistants',
value: 'list',
action: 'List assistants',
description: 'List assistants in the organization',
},
{
name: 'Message an Assistant',
value: 'message',
action: 'Message an assistant',
description: 'Send messages to an assistant',
},
{
name: 'Update an Assistant',
value: 'update',
action: 'Update an assistant',
description: 'Update an existing assistant',
},
],
default: 'message',
displayOptions: {
show: {
resource: ['assistant'],
},
},
},
...create.description,
...deleteAssistant.description,
...message.description,
...list.description,
...update.description,
];

View file

@ -0,0 +1,75 @@
import type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { apiRequest } from '../../transport';
const properties: INodeProperties[] = [
{
displayName: 'Simplify Output',
name: 'simplify',
type: 'boolean',
default: true,
description: 'Whether to return a simplified version of the response instead of the raw data',
},
];
const displayOptions = {
show: {
operation: ['list'],
resource: ['assistant'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const returnData: INodeExecutionData[] = [];
let has_more = true;
let after: string | undefined;
do {
const response = await apiRequest.call(this, 'GET', '/assistants', {
headers: {
'OpenAI-Beta': 'assistants=v1',
},
qs: {
limit: 100,
after,
},
});
for (const assistant of response.data || []) {
try {
assistant.created_at = new Date(assistant.created_at * 1000).toISOString();
} catch (error) {}
returnData.push({ json: assistant, pairedItem: { item: i } });
}
has_more = response.has_more;
if (has_more) {
after = response.last_id as string;
} else {
break;
}
} while (has_more);
const simplify = this.getNodeParameter('simplify', i) as boolean;
if (simplify) {
return returnData.map((item) => {
const { id, name, model } = item.json;
return {
json: {
id,
name,
model,
},
pairedItem: { item: i },
};
});
}
return returnData;
}

View file

@ -0,0 +1,174 @@
import { AgentExecutor } from 'langchain/agents';
import type { Tool } from 'langchain/tools';
import { OpenAIAssistantRunnable } from 'langchain/experimental/openai_assistant';
import type { OpenAIToolType } from 'langchain/dist/experimental/openai_assistant/schema';
import { OpenAI as OpenAIClient } from 'openai';
import { NodeConnectionType, NodeOperationError, updateDisplayOptions } from 'n8n-workflow';
import type { IExecuteFunctions, INodeExecutionData, INodeProperties } from 'n8n-workflow';
import { formatToOpenAIAssistantTool } from '../../helpers/utils';
import { assistantRLC } from '../descriptions';
const properties: INodeProperties[] = [
assistantRLC,
{
displayName: 'Prompt',
name: 'prompt',
type: 'options',
options: [
{
// eslint-disable-next-line n8n-nodes-base/node-param-display-name-miscased
name: 'Take from previous node automatically',
value: 'auto',
description: 'Looks for an input field called chatInput',
},
{
// eslint-disable-next-line n8n-nodes-base/node-param-display-name-miscased
name: 'Define below',
value: 'define',
description: 'Use an expression to reference data in previous nodes or enter static text',
},
],
default: 'auto',
},
{
displayName: 'Text',
name: 'text',
type: 'string',
required: true,
default: '',
placeholder: 'e.g. Hello, how can you help me?',
typeOptions: {
rows: 2,
},
displayOptions: {
show: {
prompt: ['define'],
},
},
},
{
displayName: 'Connect your own custom n8n tools to this node on the canvas',
name: 'noticeTools',
type: 'notice',
default: '',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
description: 'Additional options to add',
type: 'collection',
default: {},
options: [
{
displayName: 'Base URL',
name: 'baseURL',
default: 'https://api.openai.com/v1',
description: 'Override the default base URL for the API',
type: 'string',
},
{
displayName: 'Max Retries',
name: 'maxRetries',
default: 2,
description: 'Maximum number of retries to attempt',
type: 'number',
},
{
displayName: 'Timeout',
name: 'timeout',
default: 10000,
description: 'Maximum amount of time a request is allowed to take in milliseconds',
type: 'number',
},
],
},
];
const displayOptions = {
show: {
operation: ['message'],
resource: ['assistant'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const credentials = await this.getCredentials('openAiApi');
const prompt = this.getNodeParameter('prompt', i) as string;
let input;
if (prompt === 'auto') {
input = this.evaluateExpression('{{ $json["chatInput"] }}', i) as string;
} else {
input = this.getNodeParameter('text', i) as string;
}
if (input === undefined) {
throw new NodeOperationError(this.getNode(), 'No prompt specified', {
description:
"Expected to find the prompt in an input field called 'chatInput' (this is what the chat trigger node outputs). To use something else, change the 'Prompt' parameter",
});
}
const assistantId = this.getNodeParameter('assistantId', i, '', { extractValue: true }) as string;
const options = this.getNodeParameter('options', i, {}) as {
baseURL?: string;
maxRetries: number;
timeout: number;
};
const client = new OpenAIClient({
apiKey: credentials.apiKey as string,
maxRetries: options.maxRetries ?? 2,
timeout: options.timeout ?? 10000,
baseURL: options.baseURL,
});
const agent = new OpenAIAssistantRunnable({ assistantId, client, asAgent: true });
const tools = ((await this.getInputConnectionData(NodeConnectionType.AiTool, 0)) as Tool[]) || [];
if (tools.length) {
const transformedConnectedTools = tools?.map(formatToOpenAIAssistantTool) ?? [];
const nativeToolsParsed: OpenAIToolType = [];
const assistant = await client.beta.assistants.retrieve(assistantId);
const useCodeInterpreter = assistant.tools.some((tool) => tool.type === 'code_interpreter');
if (useCodeInterpreter) {
nativeToolsParsed.push({
type: 'code_interpreter',
});
}
const useRetrieval = assistant.tools.some((tool) => tool.type === 'retrieval');
if (useRetrieval) {
nativeToolsParsed.push({
type: 'retrieval',
});
}
await client.beta.assistants.update(assistantId, {
tools: [...nativeToolsParsed, ...transformedConnectedTools],
});
}
const agentExecutor = AgentExecutor.fromAgentAndTools({
agent,
tools: tools ?? [],
});
const response = await agentExecutor.call({
content: input,
signal: this.getExecutionCancelSignal(),
timeout: options.timeout ?? 10000,
});
return [{ json: response, pairedItem: { item: i } }];
}

View file

@ -0,0 +1,194 @@
import type {
INodeProperties,
IExecuteFunctions,
INodeExecutionData,
IDataObject,
} from 'n8n-workflow';
import { NodeOperationError, updateDisplayOptions } from 'n8n-workflow';
import { apiRequest } from '../../transport';
import { assistantRLC, modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
assistantRLC,
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Code Interpreter',
name: 'codeInterpreter',
type: 'boolean',
default: false,
description:
'Whether to enable the code interpreter that allows the assistants to write and run Python code in a sandboxed execution environment, find more <a href="https://platform.openai.com/docs/assistants/tools/code-interpreter" target="_blank">here</a>',
},
{
displayName: 'Description',
name: 'description',
type: 'string',
default: '',
description: 'The description of the assistant. The maximum length is 512 characters.',
placeholder: 'e.g. My personal assistant',
},
{
// eslint-disable-next-line n8n-nodes-base/node-param-display-name-wrong-for-dynamic-multi-options
displayName: 'Files',
name: 'file_ids',
type: 'multiOptions',
// eslint-disable-next-line n8n-nodes-base/node-param-description-wrong-for-dynamic-multi-options
description:
'The files to be used by the assistant, there can be a maximum of 20 files attached to the assistant',
typeOptions: {
loadOptionsMethod: 'getFiles',
},
default: [],
hint: "Add more files by using the 'Upload a File' operation, any existing files not selected here will be removed.",
},
{
displayName: 'Instructions',
name: 'instructions',
type: 'string',
description:
'The system instructions that the assistant uses. The maximum length is 32768 characters.',
default: '',
typeOptions: {
rows: 2,
},
},
{
displayName: 'Knowledge Retrieval',
name: 'knowledgeRetrieval',
type: 'boolean',
default: false,
description:
'Whether to augments the assistant with knowledge from outside its model, such as proprietary product information or documents, find more <a href="https://platform.openai.com/docs/assistants/tools/knowledge-retrieval" target="_blank">here</a>',
},
{ ...modelRLC, required: false },
{
displayName: 'Name',
name: 'name',
type: 'string',
default: '',
description: 'The name of the assistant. The maximum length is 256 characters.',
placeholder: 'e.g. My Assistant',
},
{
displayName: 'Remove All Custom Tools (Functions)',
name: 'removeCustomTools',
type: 'boolean',
default: false,
description: 'Whether to remove all custom tools (functions) from the assistant',
},
],
},
];
const displayOptions = {
show: {
operation: ['update'],
resource: ['assistant'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const assistantId = this.getNodeParameter('assistantId', i, '', { extractValue: true }) as string;
const options = this.getNodeParameter('options', i, {});
const {
modelId,
name,
instructions,
codeInterpreter,
knowledgeRetrieval,
file_ids,
removeCustomTools,
} = options;
const assistantDescription = options.description as string;
const body: IDataObject = {};
if (file_ids) {
if ((file_ids as IDataObject[]).length > 20) {
throw new NodeOperationError(
this.getNode(),
'The maximum number of files that can be attached to the assistant is 20',
{ itemIndex: i },
);
}
body.file_ids = file_ids;
}
if (modelId) {
body.model = this.getNodeParameter('options.modelId', i, '', { extractValue: true }) as string;
}
if (name) {
body.name = name;
}
if (assistantDescription) {
body.description = assistantDescription;
}
if (instructions) {
body.instructions = instructions;
}
let tools =
((
await apiRequest.call(this, 'GET', `/assistants/${assistantId}`, {
headers: {
'OpenAI-Beta': 'assistants=v1',
},
})
).tools as IDataObject[]) || [];
if (codeInterpreter && !tools.find((tool) => tool.type === 'code_interpreter')) {
tools.push({
type: 'code_interpreter',
});
}
if (codeInterpreter === false && tools.find((tool) => tool.type === 'code_interpreter')) {
tools = tools.filter((tool) => tool.type !== 'code_interpreter');
}
if (knowledgeRetrieval && !tools.find((tool) => tool.type === 'retrieval')) {
tools.push({
type: 'retrieval',
});
}
if (knowledgeRetrieval === false && tools.find((tool) => tool.type === 'retrieval')) {
tools = tools.filter((tool) => tool.type !== 'retrieval');
}
if (removeCustomTools) {
tools = tools.filter((tool) => tool.type !== 'function');
}
body.tools = tools;
const response = await apiRequest.call(this, 'POST', `/assistants/${assistantId}`, {
body,
headers: {
'OpenAI-Beta': 'assistants=v1',
},
});
return [
{
json: response,
pairedItem: { item: i },
},
];
}

View file

@ -0,0 +1,187 @@
import type {
INodeProperties,
IExecuteFunctions,
IDataObject,
INodeExecutionData,
} from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { apiRequest } from '../../transport';
const properties: INodeProperties[] = [
{
displayName: 'Model',
name: 'model',
type: 'options',
default: 'tts-1',
options: [
{
name: 'TTS-1',
value: 'tts-1',
},
{
name: 'TTS-1-HD',
value: 'tts-1-hd',
},
],
},
{
displayName: 'Text Input',
name: 'input',
type: 'string',
placeholder: 'e.g. The quick brown fox jumped over the lazy dog',
description: 'The text to generate audio for. The maximum length is 4096 characters.',
default: '',
typeOptions: {
rows: 2,
},
},
{
displayName: 'Voice',
name: 'voice',
type: 'options',
default: 'alloy',
description: 'The voice to use when generating the audio',
options: [
{
name: 'Alloy',
value: 'alloy',
},
{
name: 'Echo',
value: 'echo',
},
{
name: 'Fable',
value: 'fable',
},
{
name: 'Nova',
value: 'nova',
},
{
name: 'Onyx',
value: 'onyx',
},
{
name: 'Shimmer',
value: 'shimmer',
},
],
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Response Format',
name: 'response_format',
type: 'options',
default: 'mp3',
options: [
{
name: 'MP3',
value: 'mp3',
},
{
name: 'OPUS',
value: 'opus',
},
{
name: 'AAC',
value: 'aac',
},
{
name: 'FLAC',
value: 'flac',
},
],
},
{
displayName: 'Audio Speed',
name: 'speed',
type: 'number',
default: 1,
typeOptions: {
minValue: 0.25,
maxValue: 4,
numberPrecision: 1,
},
},
{
displayName: 'Put Output in Field',
name: 'binaryPropertyOutput',
type: 'string',
default: 'data',
hint: 'The name of the output field to put the binary file data in',
},
],
},
];
const displayOptions = {
show: {
operation: ['generate'],
resource: ['audio'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = this.getNodeParameter('model', i) as string;
const input = this.getNodeParameter('input', i) as string;
const voice = this.getNodeParameter('voice', i) as string;
let response_format = 'mp3';
let speed = 1;
const options = this.getNodeParameter('options', i, {});
if (options.response_format) {
response_format = options.response_format as string;
}
if (options.speed) {
speed = options.speed as number;
}
const body: IDataObject = {
model,
input,
voice,
response_format,
speed,
};
const option = {
useStream: true,
returnFullResponse: true,
encoding: 'arraybuffer',
json: false,
};
const response = await apiRequest.call(this, 'POST', '/audio/speech', { body, option });
const binaryData = await this.helpers.prepareBinaryData(
response,
`audio.${response_format}`,
`audio/${response_format}`,
);
const binaryPropertyOutput = (options.binaryPropertyOutput as string) || 'data';
const newItem: INodeExecutionData = {
json: {
...binaryData,
data: undefined,
},
pairedItem: { item: i },
binary: {
[binaryPropertyOutput]: binaryData,
},
};
return [newItem];
}

View file

@ -0,0 +1,57 @@
import type { INodeProperties } from 'n8n-workflow';
import * as generate from './generate.operation';
import * as transcribe from './transcribe.operation';
import * as translate from './translate.operation';
export { generate, transcribe, translate };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Generate Audio',
value: 'generate',
action: 'Generate audio',
description: 'Creates audio from a text prompt',
},
{
name: 'Transcribe a Recording',
value: 'transcribe',
action: 'Transcribe a recording',
description: 'Transcribes audio into the text',
},
{
name: 'Translate a Recording',
value: 'translate',
action: 'Translate a recording',
description: 'Translate audio into the text in the english language',
},
],
default: 'generate',
displayOptions: {
show: {
resource: ['audio'],
},
},
},
{
displayName: 'OpenAI API limits the size of the audio file to 25 MB',
name: 'fileSizeLimitNotice',
type: 'notice',
default: ' ',
displayOptions: {
show: {
resource: ['audio'],
operation: ['translate', 'transcribe'],
},
},
},
...generate.description,
...transcribe.description,
...translate.description,
];

View file

@ -0,0 +1,95 @@
import type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import FormData from 'form-data';
import { apiRequest } from '../../transport';
const properties: INodeProperties[] = [
{
displayName: 'Input Data Field Name',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
placeholder: 'e.g. data',
hint: 'The name of the input field containing the binary file data to be processed',
description:
'Name of the binary property which contains the audio file in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Language of the Audio File',
name: 'language',
type: 'string',
description:
'The language of the input audio. Supplying the input language in <a href="https://en.wikipedia.org/wiki/List_of_ISO_639_language_codes" target="_blank">ISO-639-1</a> format will improve accuracy and latency.',
default: '',
},
{
displayName: 'Output Randomness (Temperature)',
name: 'temperature',
type: 'number',
default: 0,
typeOptions: {
minValue: 0,
maxValue: 1,
numberPrecision: 1,
},
},
],
},
];
const displayOptions = {
show: {
operation: ['transcribe'],
resource: ['audio'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = 'whisper-1';
const binaryPropertyName = this.getNodeParameter('binaryPropertyName', i);
const options = this.getNodeParameter('options', i, {});
const formData = new FormData();
formData.append('model', model);
if (options.language) {
formData.append('language', options.language);
}
if (options.temperature) {
formData.append('temperature', options.temperature.toString());
}
const binaryData = this.helpers.assertBinaryData(i, binaryPropertyName);
const dataBuffer = await this.helpers.getBinaryDataBuffer(i, binaryPropertyName);
formData.append('file', dataBuffer, {
filename: binaryData.fileName,
contentType: binaryData.mimeType,
});
const response = await apiRequest.call(this, 'POST', '/audio/transcriptions', {
option: { formData },
headers: {
'Content-Type': 'multipart/form-data',
},
});
return [
{
json: response,
pairedItem: { item: i },
},
];
}

View file

@ -0,0 +1,82 @@
import type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import FormData from 'form-data';
import { apiRequest } from '../../transport';
const properties: INodeProperties[] = [
{
displayName: 'Input Data Field Name',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
hint: 'The name of the input field containing the binary file data to be processed',
placeholder: 'e.g. data',
description:
'Name of the binary property which contains the audio file in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Output Randomness (Temperature)',
name: 'temperature',
type: 'number',
default: 0,
typeOptions: {
minValue: 0,
maxValue: 1,
numberPrecision: 1,
},
},
],
},
];
const displayOptions = {
show: {
operation: ['translate'],
resource: ['audio'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = 'whisper-1';
const binaryPropertyName = this.getNodeParameter('binaryPropertyName', i);
const options = this.getNodeParameter('options', i, {});
const formData = new FormData();
formData.append('model', model);
if (options.temperature) {
formData.append('temperature', options.temperature.toString());
}
const binaryData = this.helpers.assertBinaryData(i, binaryPropertyName);
const dataBuffer = await this.helpers.getBinaryDataBuffer(i, binaryPropertyName);
formData.append('file', dataBuffer, {
filename: binaryData.fileName,
contentType: binaryData.mimeType,
});
const response = await apiRequest.call(this, 'POST', '/audio/translations', {
option: { formData },
headers: {
'Content-Type': 'multipart/form-data',
},
});
return [
{
json: response,
pairedItem: { item: i },
},
];
}

View file

@ -0,0 +1,53 @@
import type { INodeProperties } from 'n8n-workflow';
export const modelRLC: INodeProperties = {
displayName: 'Model',
name: 'modelId',
type: 'resourceLocator',
default: { mode: 'list', value: '' },
required: true,
modes: [
{
displayName: 'From List',
name: 'list',
type: 'list',
typeOptions: {
searchListMethod: 'modelSearch',
searchable: true,
},
},
{
displayName: 'ID',
name: 'id',
type: 'string',
placeholder: 'e.g. gpt-4',
},
],
};
export const assistantRLC: INodeProperties = {
displayName: 'Assistant',
name: 'assistantId',
type: 'resourceLocator',
description:
'Assistant to respond to the message. You can add, modify or remove assistants in the <a href="https://platform.openai.com/playground?mode=assistant" target="_blank">playground</a>.',
default: { mode: 'list', value: '' },
required: true,
modes: [
{
displayName: 'From List',
name: 'list',
type: 'list',
typeOptions: {
searchListMethod: 'assistantSearch',
searchable: true,
},
},
{
displayName: 'ID',
name: 'id',
type: 'string',
placeholder: 'e.g. asst_abc123',
},
],
};

View file

@ -0,0 +1,62 @@
import type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { apiRequest } from '../../transport';
const properties: INodeProperties[] = [
{
displayName: 'File',
name: 'fileId',
type: 'resourceLocator',
default: { mode: 'list', value: '' },
required: true,
modes: [
{
displayName: 'From List',
name: 'list',
type: 'list',
typeOptions: {
searchListMethod: 'fileSearch',
searchable: true,
},
},
{
displayName: 'ID',
name: 'id',
type: 'string',
validation: [
{
type: 'regex',
properties: {
regex: 'file-[a-zA-Z0-9]',
errorMessage: 'Not a valid File ID',
},
},
],
placeholder: 'e.g. file-1234567890',
},
],
},
];
const displayOptions = {
show: {
operation: ['deleteFile'],
resource: ['file'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const fileId = this.getNodeParameter('fileId', i, '', { extractValue: true });
const response = await apiRequest.call(this, 'DELETE', `/files/${fileId}`);
return [
{
json: response,
pairedItem: { item: i },
},
];
}

View file

@ -0,0 +1,46 @@
import type { INodeProperties } from 'n8n-workflow';
import * as upload from './upload.operation';
import * as deleteFile from './deleteFile.operation';
import * as list from './list.operation';
export { upload, deleteFile, list };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Delete a File',
value: 'deleteFile',
action: 'Delete a file',
description: 'Delete a file from the server',
},
{
name: 'List Files',
value: 'list',
action: 'List files',
description: "Returns a list of files that belong to the user's organization",
},
{
name: 'Upload a File',
value: 'upload',
action: 'Upload a file',
description: 'Upload a file that can be used across various endpoints',
},
],
default: 'upload',
displayOptions: {
show: {
resource: ['file'],
},
},
},
...upload.description,
...deleteFile.description,
...list.description,
];

View file

@ -0,0 +1,67 @@
import type {
IDataObject,
INodeProperties,
IExecuteFunctions,
INodeExecutionData,
} from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { apiRequest } from '../../transport';
const properties: INodeProperties[] = [
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Purpose',
name: 'purpose',
type: 'options',
default: 'any',
description: 'Only return files with the given purpose',
options: [
{
name: 'Any [Default]',
value: 'any',
},
{
name: 'Assistants',
value: 'assistants',
},
{
name: 'Fine-Tune',
value: 'fine-tune',
},
],
},
],
},
];
const displayOptions = {
show: {
operation: ['list'],
resource: ['file'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const options = this.getNodeParameter('options', i, {});
const qs: IDataObject = {};
if (options.purpose && options.purpose !== 'any') {
qs.purpose = options.purpose as string;
}
const { data } = await apiRequest.call(this, 'GET', '/files', { qs });
return (data || []).map((file: IDataObject) => ({
json: file,
pairedItem: { item: i },
}));
}

View file

@ -0,0 +1,98 @@
import type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
import { updateDisplayOptions, NodeOperationError } from 'n8n-workflow';
import FormData from 'form-data';
import { apiRequest } from '../../transport';
const properties: INodeProperties[] = [
{
displayName: 'Input Data Field Name',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
hint: 'The name of the input field containing the binary file data to be processed',
placeholder: 'e.g. data',
description:
'Name of the binary property which contains the file. The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants.',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Purpose',
name: 'purpose',
type: 'options',
default: 'assistants',
description:
"The intended purpose of the uploaded file, the 'Fine-tuning' only supports .jsonl files",
options: [
{
name: 'Assistants',
value: 'assistants',
},
{
name: 'Fine-Tune',
value: 'fine-tune',
},
],
},
],
},
];
const displayOptions = {
show: {
operation: ['upload'],
resource: ['file'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const binaryPropertyName = this.getNodeParameter('binaryPropertyName', i);
const options = this.getNodeParameter('options', i, {});
const formData = new FormData();
formData.append('purpose', options.purpose || 'assistants');
const binaryData = this.helpers.assertBinaryData(i, binaryPropertyName);
const dataBuffer = await this.helpers.getBinaryDataBuffer(i, binaryPropertyName);
formData.append('file', dataBuffer, {
filename: binaryData.fileName,
contentType: binaryData.mimeType,
});
try {
const response = await apiRequest.call(this, 'POST', '/files', {
option: { formData },
headers: {
'Content-Type': 'multipart/form-data',
},
});
return [
{
json: response,
pairedItem: { item: i },
},
];
} catch (error) {
if (
error.message.includes('Bad request') &&
error.description &&
error.description.includes('Expected file to have JSONL format')
) {
throw new NodeOperationError(this.getNode(), 'The file content is not in JSONL format', {
description:
'Fine-tuning accepts only files in JSONL format, where every line is a valid JSON dictionary',
});
}
throw error;
}
}

View file

@ -0,0 +1,211 @@
import type {
INodeProperties,
IExecuteFunctions,
INodeExecutionData,
IDataObject,
} from 'n8n-workflow';
import { updateDisplayOptions, NodeOperationError } from 'n8n-workflow';
import { apiRequest } from '../../transport';
const properties: INodeProperties[] = [
{
displayName: 'Text Input',
name: 'text',
type: 'string',
placeholder: "e.g. What's in this image?",
default: "What's in this image?",
typeOptions: {
rows: 2,
},
},
{
displayName: 'Input Type',
name: 'inputType',
type: 'options',
default: 'url',
options: [
{
name: 'Image URL(s)',
value: 'url',
},
{
name: 'Binary File(s)',
value: 'base64',
},
],
},
{
displayName: 'URL(s)',
name: 'imageUrls',
type: 'string',
placeholder: 'e.g. https://example.com/image.jpeg',
description: 'URL(s) of the image(s) to analyze, multiple URLs can be added separated by comma',
default: '',
displayOptions: {
show: {
inputType: ['url'],
},
},
},
{
displayName: 'Input Data Field Name',
name: 'binaryPropertyName',
type: 'string',
default: 'data',
placeholder: 'e.g. data',
hint: 'The name of the input field containing the binary file data to be processed',
description: 'Name of the binary property which contains the image(s)',
displayOptions: {
show: {
inputType: ['base64'],
},
},
},
{
displayName: 'Simplify Output',
name: 'simplify',
type: 'boolean',
default: true,
description: 'Whether to simplify the response or not',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Detail',
name: 'detail',
type: 'options',
default: 'auto',
options: [
{
name: 'Auto',
value: 'auto',
description:
'Model will look at the image input size and decide if it should use the low or high setting',
},
{
name: 'Low',
value: 'low',
description: 'Return faster responses and consume fewer tokens',
},
{
name: 'High',
value: 'high',
description: 'Return more detailed responses, consumes more tokens',
},
],
},
{
displayName: 'Length of Description (Max Tokens)',
description: 'Fewer tokens will result in shorter, less detailed image description',
name: 'maxTokens',
type: 'number',
default: 300,
typeOptions: {
minValue: 1,
},
},
],
},
];
const displayOptions = {
show: {
operation: ['analyze'],
resource: ['image'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = 'gpt-4-vision-preview';
const text = this.getNodeParameter('text', i, '') as string;
const inputType = this.getNodeParameter('inputType', i) as string;
const options = this.getNodeParameter('options', i, {});
const content: IDataObject[] = [
{
type: 'text',
text,
},
];
const detail = (options.detail as string) || 'auto';
if (inputType === 'url') {
const imageUrls = (this.getNodeParameter('imageUrls', i) as string)
.split(',')
.map((url) => url.trim());
for (const url of imageUrls) {
content.push({
type: 'image_url',
image_url: {
url,
detail,
},
});
}
} else {
const binaryPropertyName = this.getNodeParameter('binaryPropertyName', i)
.split(',')
.map((propertyName) => propertyName.trim());
for (const propertyName of binaryPropertyName) {
const binaryData = this.helpers.assertBinaryData(i, propertyName);
let fileBase64;
if (binaryData.id) {
const chunkSize = 256 * 1024;
const stream = await this.helpers.getBinaryStream(binaryData.id, chunkSize);
const buffer = await this.helpers.binaryToBuffer(stream);
fileBase64 = buffer.toString('base64');
} else {
fileBase64 = binaryData.data;
}
if (!binaryData) {
throw new NodeOperationError(this.getNode(), 'No binary data exists on item!');
}
content.push({
type: 'image_url',
image_url: {
url: `data:${binaryData.mimeType};base64,${fileBase64}`,
detail,
},
});
}
}
const body = {
model,
messages: [
{
role: 'user',
content,
},
],
max_tokens: (options.maxTokens as number) || 300,
};
let response = await apiRequest.call(this, 'POST', '/chat/completions', { body });
const simplify = this.getNodeParameter('simplify', i) as boolean;
if (simplify && response.choices) {
response = { content: response.choices[0].message.content };
}
return [
{
json: response,
pairedItem: { item: i },
},
];
}

View file

@ -0,0 +1,244 @@
import type {
INodeProperties,
IExecuteFunctions,
INodeExecutionData,
IDataObject,
} from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { apiRequest } from '../../transport';
const properties: INodeProperties[] = [
{
displayName: 'Model',
name: 'model',
type: 'options',
default: 'dall-e-3',
description: 'The model to use for image generation',
options: [
{
name: 'DALL-E-2',
value: 'dall-e-2',
},
{
name: 'DALL-E-3',
value: 'dall-e-3',
},
],
},
{
displayName: 'Prompt',
name: 'prompt',
type: 'string',
placeholder: 'e.g. A cute cat eating a dinosaur',
description:
'A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.',
default: '',
typeOptions: {
rows: 2,
},
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Number of Images',
name: 'n',
default: 1,
description: 'Number of images to generate',
type: 'number',
typeOptions: {
minValue: 1,
maxValue: 10,
},
displayOptions: {
show: {
'/model': ['dall-e-2'],
},
},
},
{
displayName: 'Quality',
name: 'quality',
type: 'options',
description:
'The quality of the image that will be generated, HD creates images with finer details and greater consistency across the image',
options: [
{
name: 'HD',
value: 'hd',
},
{
name: 'Standard',
value: 'standard',
},
],
displayOptions: {
show: {
'/model': ['dall-e-3'],
},
},
default: 'standard',
},
{
displayName: 'Resolution',
name: 'size',
type: 'options',
options: [
{
name: '256x256',
value: '256x256',
},
{
name: '512x512',
value: '512x512',
},
{
name: '1024x1024',
value: '1024x1024',
},
],
displayOptions: {
show: {
'/model': ['dall-e-2'],
},
},
default: '1024x1024',
},
{
displayName: 'Resolution',
name: 'size',
type: 'options',
options: [
{
name: '1024x1024',
value: '1024x1024',
},
{
name: '1792x1024',
value: '1792x1024',
},
{
name: '1024x1792',
value: '1024x1792',
},
],
displayOptions: {
show: {
'/model': ['dall-e-3'],
},
},
default: '1024x1024',
},
{
displayName: 'Style',
name: 'style',
type: 'options',
options: [
{
name: 'Natural',
value: 'natural',
description: 'Produce more natural looking images',
},
{
name: 'Vivid',
value: 'vivid',
description: 'Lean towards generating hyper-real and dramatic images',
},
],
displayOptions: {
show: {
'/model': ['dall-e-3'],
},
},
default: 'vivid',
},
{
displayName: 'Respond with Image URL(s)',
name: 'returnImageUrls',
type: 'boolean',
default: false,
description: 'Whether to return image URL(s) instead of binary file(s)',
},
{
displayName: 'Put Output in Field',
name: 'binaryPropertyOutput',
type: 'string',
default: 'data',
hint: 'The name of the output field to put the binary file data in',
displayOptions: {
show: {
returnImageUrls: [false],
},
},
},
],
},
];
const displayOptions = {
show: {
operation: ['generate'],
resource: ['image'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = this.getNodeParameter('model', i) as string;
const prompt = this.getNodeParameter('prompt', i) as string;
const options = this.getNodeParameter('options', i, {});
let response_format = 'b64_json';
let binaryPropertyOutput = 'data';
if (options.returnImageUrls) {
response_format = 'url';
}
if (options.binaryPropertyOutput) {
binaryPropertyOutput = options.binaryPropertyOutput as string;
delete options.binaryPropertyOutput;
}
delete options.returnImageUrls;
const body: IDataObject = {
prompt,
model,
response_format,
...options,
};
const { data } = await apiRequest.call(this, 'POST', '/images/generations', { body });
if (response_format === 'url') {
return ((data as IDataObject[]) || []).map((entry) => ({
json: entry,
pairedItem: { item: i },
}));
} else {
const returnData: INodeExecutionData[] = [];
for (const entry of data) {
const binaryData = await this.helpers.prepareBinaryData(
Buffer.from(entry.b64_json as string, 'base64'),
'data',
);
returnData.push({
json: Object.assign({}, binaryData, {
data: undefined,
}),
binary: {
[binaryPropertyOutput]: binaryData,
},
pairedItem: { item: i },
});
}
return returnData;
}
}

View file

@ -0,0 +1,37 @@
import type { INodeProperties } from 'n8n-workflow';
import * as generate from './generate.operation';
import * as analyze from './analyze.operation';
export { generate, analyze };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Analyze Image',
value: 'analyze',
action: 'Analyze image',
description: 'Take in images and answer questions about them',
},
{
name: 'Generate an Image',
value: 'generate',
action: 'Generate an image',
description: 'Creates an image from a text prompt',
},
],
default: 'generate',
displayOptions: {
show: {
resource: ['image'],
},
},
},
...generate.description,
...analyze.description,
];

View file

@ -0,0 +1,11 @@
import type { AllEntities } from 'n8n-workflow';
type NodeMap = {
assistant: 'message' | 'create' | 'deleteAssistant' | 'list' | 'update';
audio: 'generate' | 'transcribe' | 'translate';
file: 'upload' | 'deleteFile' | 'list';
image: 'generate' | 'analyze';
text: 'message' | 'classify';
};
export type OpenAiType = AllEntities<NodeMap>;

View file

@ -0,0 +1,65 @@
import { NodeOperationError, type IExecuteFunctions, type INodeExecutionData } from 'n8n-workflow';
import * as assistant from './assistant';
import * as audio from './audio';
import * as file from './file';
import * as image from './image';
import * as text from './text';
import type { OpenAiType } from './node.type';
export async function router(this: IExecuteFunctions) {
const returnData: INodeExecutionData[] = [];
const items = this.getInputData();
const resource = this.getNodeParameter<OpenAiType>('resource', 0);
const operation = this.getNodeParameter('operation', 0);
const openAiTypeData = {
resource,
operation,
} as OpenAiType;
let execute;
switch (openAiTypeData.resource) {
case 'assistant':
execute = assistant[openAiTypeData.operation].execute;
break;
case 'audio':
execute = audio[openAiTypeData.operation].execute;
break;
case 'file':
execute = file[openAiTypeData.operation].execute;
break;
case 'image':
execute = image[openAiTypeData.operation].execute;
break;
case 'text':
execute = text[openAiTypeData.operation].execute;
break;
default:
throw new NodeOperationError(
this.getNode(),
`The operation "${operation}" is not supported!`,
);
}
for (let i = 0; i < items.length; i++) {
try {
const responseData = await execute.call(this, i);
returnData.push(...responseData);
} catch (error) {
if (this.continueOnFail()) {
returnData.push({ json: { error: error.message }, pairedItem: { item: i } });
continue;
}
throw new NodeOperationError(this.getNode(), error, {
itemIndex: i,
description: error.description,
});
}
}
return [returnData];
}

View file

@ -0,0 +1,83 @@
import type { INodeProperties, IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
import { updateDisplayOptions } from 'n8n-workflow';
import { apiRequest } from '../../transport';
const properties: INodeProperties[] = [
{
displayName: 'Text Input',
name: 'input',
type: 'string',
placeholder: 'e.g. Sample text goes here',
description: 'The input text to classify if it is violates the moderation policy',
default: '',
typeOptions: {
rows: 2,
},
},
{
displayName: 'Simplify Output',
name: 'simplify',
type: 'boolean',
default: false,
description: 'Whether to return a simplified version of the response instead of the raw data',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Use Stable Model',
name: 'useStableModel',
type: 'boolean',
default: false,
description:
'Whether to use the stable version of the model instead of the latest version, accuracy may be slightly lower',
},
],
},
];
const displayOptions = {
show: {
operation: ['classify'],
resource: ['text'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const input = this.getNodeParameter('input', i) as string;
const options = this.getNodeParameter('options', i);
const model = options.useStableModel ? 'text-moderation-stable' : 'text-moderation-latest';
const body = {
input,
model,
};
const { results } = await apiRequest.call(this, 'POST', '/moderations', { body });
if (!results) return [];
const simplify = this.getNodeParameter('simplify', i) as boolean;
if (simplify && results) {
return [
{
json: { flagged: results[0].flagged },
pairedItem: { item: i },
},
];
} else {
return [
{
json: results[0],
pairedItem: { item: i },
},
];
}
}

View file

@ -0,0 +1,39 @@
import type { INodeProperties } from 'n8n-workflow';
import * as classify from './classify.operation';
import * as message from './message.operation';
export { classify, message };
export const description: INodeProperties[] = [
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Message a Model',
value: 'message',
action: 'Message a model',
// eslint-disable-next-line n8n-nodes-base/node-param-description-excess-final-period
description: 'Create a completion with GPT 3, 4, etc.',
},
{
name: 'Classify Text for Violations',
value: 'classify',
action: 'Classify text for violations',
description: 'Check whether content complies with usage policies',
},
],
default: 'message',
displayOptions: {
show: {
resource: ['text'],
},
},
},
...classify.description,
...message.description,
];

View file

@ -0,0 +1,272 @@
import type {
INodeProperties,
IExecuteFunctions,
INodeExecutionData,
IDataObject,
} from 'n8n-workflow';
import { NodeConnectionType, updateDisplayOptions } from 'n8n-workflow';
import { apiRequest } from '../../transport';
import type { ChatCompletion } from '../../helpers/interfaces';
import type { Tool } from 'langchain/tools';
import { formatToOpenAIAssistantTool } from '../../helpers/utils';
import { modelRLC } from '../descriptions';
const properties: INodeProperties[] = [
modelRLC,
{
displayName: 'Messages',
name: 'messages',
type: 'fixedCollection',
typeOptions: {
sortable: true,
multipleValues: true,
},
placeholder: 'Add Message',
default: { values: [{ content: '' }] },
options: [
{
displayName: 'Values',
name: 'values',
values: [
{
displayName: 'Text',
name: 'content',
type: 'string',
description: 'The content of the message to be send',
default: '',
typeOptions: {
rows: 2,
},
},
{
displayName: 'Role',
name: 'role',
type: 'options',
description:
"Role in shaping the model's response, it tells the model how it should behave and interact with the user",
options: [
{
name: 'User',
value: 'user',
description: 'Send a message as a user and get a response from the model',
},
{
name: 'Assistant',
value: 'assistant',
description: 'Tell the model to adopt a specific tone or personality',
},
{
name: 'System',
value: 'system',
description:
"Usually used to set the model's behavior or context for the next user message",
},
],
default: 'user',
},
],
},
],
},
{
displayName: 'Simplify Output',
name: 'simplify',
type: 'boolean',
default: true,
description: 'Whether to return a simplified version of the response instead of the raw data',
},
{
displayName: 'Output Content as JSON',
name: 'jsonOutput',
type: 'boolean',
description:
'Whether to attempt to return the response in JSON format, supported by gpt-3.5-turbo-1106 and gpt-4-1106-preview',
default: false,
displayOptions: {
show: {
modelId: ['gpt-3.5-turbo-1106', 'gpt-4-1106-preview'],
},
},
},
{
displayName: 'Connect your own custom n8n tools to this node on the canvas',
name: 'noticeTools',
type: 'notice',
default: '',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
type: 'collection',
default: {},
options: [
{
displayName: 'Frequency Penalty',
name: 'frequency_penalty',
default: 0,
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
description:
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
type: 'number',
},
{
displayName: 'Maximum Number of Tokens',
name: 'maxTokens',
default: 16,
description:
'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',
type: 'number',
typeOptions: {
maxValue: 32768,
},
},
{
displayName: 'Number of Completions',
name: 'n',
default: 1,
description:
'How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop.',
type: 'number',
},
{
displayName: 'Presence Penalty',
name: 'presence_penalty',
default: 0,
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
description:
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
type: 'number',
},
{
displayName: 'Output Randomness (Temperature)',
name: 'temperature',
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. We generally recommend altering this or temperature but not both.',
type: 'number',
},
{
displayName: 'Output Randomness (Top P)',
name: 'topP',
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'An alternative to sampling with temperature, controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
type: 'number',
},
],
},
];
const displayOptions = {
show: {
operation: ['message'],
resource: ['text'],
},
};
export const description = updateDisplayOptions(displayOptions, properties);
export async function execute(this: IExecuteFunctions, i: number): Promise<INodeExecutionData[]> {
const model = this.getNodeParameter('modelId', i, '', { extractValue: true });
let messages = this.getNodeParameter('messages.values', i, []) as IDataObject[];
const options = this.getNodeParameter('options', i, {});
const jsonOutput = this.getNodeParameter('jsonOutput', i, false) as boolean;
let response_format;
if (jsonOutput) {
response_format = { type: 'json_object' };
messages = [
{
role: 'system',
content: 'You are a helpful assistant designed to output JSON.',
},
...messages,
];
}
const externalTools =
((await this.getInputConnectionData(NodeConnectionType.AiTool, 0)) as Tool[]) || [];
let tools;
if (externalTools.length) {
tools = externalTools.length ? externalTools?.map(formatToOpenAIAssistantTool) : undefined;
}
const body: IDataObject = {
model,
messages,
tools,
response_format,
...options,
};
let response = (await apiRequest.call(this, 'POST', '/chat/completions', {
body,
})) as ChatCompletion;
if (!response) return [];
let toolCalls = response?.choices[0]?.message?.tool_calls;
while (toolCalls && toolCalls.length) {
messages.push(response.choices[0].message);
for (const toolCall of toolCalls) {
const functionName = toolCall.function.name;
const functionArgs = toolCall.function.arguments;
let functionResponse;
for (const tool of externalTools ?? []) {
if (tool.name === functionName) {
functionResponse = await tool.invoke(functionArgs);
}
}
if (typeof functionResponse === 'object') {
functionResponse = JSON.stringify(functionResponse);
}
messages.push({
tool_call_id: toolCall.id,
role: 'tool',
content: functionResponse,
});
}
response = (await apiRequest.call(this, 'POST', '/chat/completions', {
body,
})) as ChatCompletion;
toolCalls = response.choices[0].message.tool_calls;
}
if (response_format) {
response.choices = response.choices.map((choice) => {
try {
choice.message.content = JSON.parse(choice.message.content);
} catch (error) {}
return choice;
});
}
const simplify = this.getNodeParameter('simplify', i) as boolean;
const returnData: INodeExecutionData[] = [];
if (simplify) {
for (const entry of response.choices) {
returnData.push({
json: entry,
pairedItem: { item: i },
});
}
} else {
returnData.push({ json: response, pairedItem: { item: i } });
}
return returnData;
}

View file

@ -0,0 +1,127 @@
/* eslint-disable n8n-nodes-base/node-filename-against-convention */
import type { INodeTypeDescription } from 'n8n-workflow';
import { NodeConnectionType } from 'n8n-workflow';
import * as assistant from './assistant';
import * as audio from './audio';
import * as file from './file';
import * as image from './image';
import * as text from './text';
const prettifyOperation = (resource: string, operation: string) => {
if (operation === 'deleteAssistant') {
return 'Delete Assistant';
}
if (operation === 'deleteFile') {
return 'Delete File';
}
if (operation === 'classify') {
return 'Classify Text';
}
if (operation === 'message' && resource === 'text') {
return 'Message Model';
}
const capitalize = (str: string) => {
const chars = str.split('');
chars[0] = chars[0].toUpperCase();
return chars.join('');
};
if (['transcribe', 'translate'].includes(operation)) {
resource = 'recording';
}
if (operation === 'list') {
resource = resource + 's';
}
return `${capitalize(operation)} ${capitalize(resource)}`;
};
const configureNodeInputs = (resource: string, operation: string) => {
if (['assistant', 'text'].includes(resource) && operation === 'message') {
return [
{ type: NodeConnectionType.Main },
{ type: NodeConnectionType.AiTool, displayName: 'Tools' },
];
}
return [NodeConnectionType.Main];
};
// eslint-disable-next-line n8n-nodes-base/node-class-description-missing-subtitle
export const versionDescription: INodeTypeDescription = {
displayName: 'OpenAI',
name: 'openAi',
icon: 'file:openAi.svg',
group: ['transform'],
version: 1,
subtitle: `={{(${prettifyOperation})($parameter.resource, $parameter.operation)}}`,
description: 'Message an assistant or GPT, analyze images, generate audio, etc.',
defaults: {
name: 'OpenAI',
},
codex: {
alias: ['LangChain', 'ChatGPT', 'DallE'],
categories: ['AI'],
subcategories: {
AI: ['Agents', 'Miscellaneous'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.openai/',
},
],
},
},
inputs: `={{(${configureNodeInputs})($parameter.resource, $parameter.operation)}}`,
outputs: ['main'],
credentials: [
{
name: 'openAiApi',
required: true,
},
],
properties: [
{
displayName: 'Resource',
name: 'resource',
type: 'options',
noDataExpression: true,
// eslint-disable-next-line n8n-nodes-base/node-param-options-type-unsorted-items
options: [
{
name: 'Assistant',
value: 'assistant',
},
{
name: 'Text',
value: 'text',
},
{
name: 'Image',
value: 'image',
},
{
name: 'Audio',
value: 'audio',
},
{
name: 'File',
value: 'file',
},
],
default: 'text',
},
...assistant.description,
...audio.description,
...file.description,
...image.description,
...text.description,
],
};

View file

@ -0,0 +1,58 @@
import type { IDataObject } from 'n8n-workflow';
export type ChatCompletion = {
id: string;
object: string;
created: number;
model: string;
choices: Array<{
index: number;
message: {
role: string;
content: string;
tool_calls?: Array<{
id: string;
type: 'function';
function: {
name: string;
arguments: string;
};
}>;
};
finish_reason?: 'tool_calls';
}>;
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
system_fingerprint: string;
};
export type ThreadMessage = {
id: string;
object: string;
created_at: number;
thread_id: string;
role: string;
content: Array<{
type: string;
text: {
value: string;
annotations: string[];
};
}>;
file_ids: string[];
assistant_id: string;
run_id: string;
metadata: IDataObject;
};
export type ExternalApiCallOptions = {
callExternalApi: boolean;
url: string;
path: string;
method: string;
requestOptions: IDataObject;
sendParametersIn: string;
};

View file

@ -0,0 +1,47 @@
import { zodToJsonSchema } from 'zod-to-json-schema';
import type { OpenAI as OpenAIClient } from 'openai';
import type { StructuredTool } from 'langchain/tools';
// Copied from langchain(`langchain/src/tools/convert_to_openai.ts`)
// since these functions are not exported
/**
* Formats a `StructuredTool` instance into a format that is compatible
* with OpenAI's ChatCompletionFunctions. It uses the `zodToJsonSchema`
* function to convert the schema of the `StructuredTool` into a JSON
* schema, which is then used as the parameters for the OpenAI function.
*/
export function formatToOpenAIFunction(
tool: StructuredTool,
): OpenAIClient.Chat.ChatCompletionCreateParams.Function {
return {
name: tool.name,
description: tool.description,
parameters: zodToJsonSchema(tool.schema),
};
}
export function formatToOpenAITool(tool: StructuredTool): OpenAIClient.Chat.ChatCompletionTool {
const schema = zodToJsonSchema(tool.schema);
return {
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: schema,
},
};
}
export function formatToOpenAIAssistantTool(
tool: StructuredTool,
): OpenAIClient.Beta.AssistantCreateParams.AssistantToolsFunction {
return {
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: zodToJsonSchema(tool.schema),
},
};
}

View file

@ -0,0 +1,2 @@
export * as listSearch from './listSearch';
export * as loadOptions from './loadOptions';

View file

@ -0,0 +1,119 @@
import type {
IDataObject,
ILoadOptionsFunctions,
INodeListSearchItems,
INodeListSearchResult,
} from 'n8n-workflow';
import { apiRequest } from '../transport';
export async function fileSearch(
this: ILoadOptionsFunctions,
filter?: string,
): Promise<INodeListSearchResult> {
const { data } = await apiRequest.call(this, 'GET', '/files');
if (filter) {
const results: INodeListSearchItems[] = [];
for (const file of data || []) {
if ((file.filename as string)?.toLowerCase().includes(filter.toLowerCase())) {
results.push({
name: file.filename as string,
value: file.id as string,
});
}
}
return {
results,
};
} else {
return {
results: (data || []).map((file: IDataObject) => ({
name: file.filename as string,
value: file.id as string,
})),
};
}
}
export async function modelSearch(
this: ILoadOptionsFunctions,
filter?: string,
): Promise<INodeListSearchResult> {
let { data } = await apiRequest.call(this, 'GET', '/models');
data = data?.filter((model: IDataObject) => (model.id as string).startsWith('gpt-'));
let results: INodeListSearchItems[] = [];
if (filter) {
for (const model of data || []) {
if ((model.id as string)?.toLowerCase().includes(filter.toLowerCase())) {
results.push({
name: (model.id as string).toUpperCase(),
value: model.id as string,
});
}
}
} else {
results = (data || []).map((model: IDataObject) => ({
name: (model.id as string).toUpperCase(),
value: model.id as string,
}));
}
results = results.sort((a, b) => a.name.localeCompare(b.name));
return {
results,
};
}
export async function assistantSearch(
this: ILoadOptionsFunctions,
filter?: string,
paginationToken?: string,
): Promise<INodeListSearchResult> {
const { data, has_more, last_id } = await apiRequest.call(this, 'GET', '/assistants', {
headers: {
'OpenAI-Beta': 'assistants=v1',
},
qs: {
limit: 100,
after: paginationToken,
},
});
if (has_more === true) {
paginationToken = last_id;
} else {
paginationToken = undefined;
}
if (filter) {
const results: INodeListSearchItems[] = [];
for (const assistant of data || []) {
if ((assistant.name as string)?.toLowerCase().includes(filter.toLowerCase())) {
results.push({
name: assistant.name as string,
value: assistant.id as string,
});
}
}
return {
results,
};
} else {
return {
results: (data || []).map((assistant: IDataObject) => ({
name: assistant.name as string,
value: assistant.id as string,
})),
paginationToken,
};
}
}

View file

@ -0,0 +1,17 @@
import type { ILoadOptionsFunctions, INodePropertyOptions } from 'n8n-workflow';
import { apiRequest } from '../transport';
export async function getFiles(this: ILoadOptionsFunctions): Promise<INodePropertyOptions[]> {
const { data } = await apiRequest.call(this, 'GET', '/files', { qs: { purpose: 'assistants' } });
const returnData: INodePropertyOptions[] = [];
for (const file of data || []) {
returnData.push({
name: file.filename as string,
value: file.id as string,
});
}
return returnData;
}

View file

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="256" height="260" preserveAspectRatio="xMidYMid"><path d="M239.184 106.203a64.72 64.72 0 0 0-5.576-53.103C219.452 28.459 191 15.784 163.213 21.74A65.586 65.586 0 0 0 52.096 45.22a64.72 64.72 0 0 0-43.23 31.36c-14.31 24.602-11.061 55.634 8.033 76.74a64.67 64.67 0 0 0 5.525 53.102c14.174 24.65 42.644 37.324 70.446 31.36a64.72 64.72 0 0 0 48.754 21.744c28.481.025 53.714-18.361 62.414-45.481a64.77 64.77 0 0 0 43.229-31.36c14.137-24.558 10.875-55.423-8.083-76.483m-97.56 136.338a48.4 48.4 0 0 1-31.105-11.255l1.535-.87 51.67-29.825a8.6 8.6 0 0 0 4.247-7.367v-72.85l21.845 12.636c.218.111.37.32.409.563v60.367c-.056 26.818-21.783 48.545-48.601 48.601M37.158 197.93a48.35 48.35 0 0 1-5.781-32.589l1.534.921 51.722 29.826a8.34 8.34 0 0 0 8.441 0l63.181-36.425v25.221a.87.87 0 0 1-.358.665l-52.335 30.184c-23.257 13.398-52.97 5.431-66.404-17.803M23.549 85.38a48.5 48.5 0 0 1 25.58-21.333v61.39a8.29 8.29 0 0 0 4.195 7.316l62.874 36.272-21.845 12.636a.82.82 0 0 1-.767 0L41.353 151.53c-23.211-13.454-31.171-43.144-17.804-66.405zm179.466 41.695-63.08-36.63L161.73 77.86a.82.82 0 0 1 .768 0l52.233 30.184a48.6 48.6 0 0 1-7.316 87.635v-61.391a8.54 8.54 0 0 0-4.4-7.213m21.742-32.69-1.535-.922-51.619-30.081a8.39 8.39 0 0 0-8.492 0L99.98 99.808V74.587a.72.72 0 0 1 .307-.665l52.233-30.133a48.652 48.652 0 0 1 72.236 50.391zM88.061 139.097l-21.845-12.585a.87.87 0 0 1-.41-.614V65.685a48.652 48.652 0 0 1 79.757-37.346l-1.535.87-51.67 29.825a8.6 8.6 0 0 0-4.246 7.367zm11.868-25.58L128.067 97.3l28.188 16.218v32.434l-28.086 16.218-28.188-16.218z"/></svg>

After

Width:  |  Height:  |  Size: 1.6 KiB

View file

@ -0,0 +1,522 @@
import * as assistant from '../actions/assistant';
import * as audio from '../actions/audio';
import * as file from '../actions/file';
import * as image from '../actions/image';
import * as text from '../actions/text';
import type { IDataObject, IExecuteFunctions } from 'n8n-workflow';
import * as transport from '../transport';
import get from 'lodash/get';
const createExecuteFunctionsMock = (parameters: IDataObject) => {
const nodeParameters = parameters;
return {
getNodeParameter(parameter: string) {
return get(nodeParameters, parameter);
},
getNode() {
return {};
},
getInputConnectionData() {
return undefined;
},
helpers: {
prepareBinaryData() {
return {};
},
assertBinaryData() {
return {
filename: 'filenale.flac',
contentType: 'audio/flac',
};
},
getBinaryDataBuffer() {
return 'data buffer data';
},
},
} as unknown as IExecuteFunctions;
};
describe('OpenAi, Assistant resource', () => {
beforeEach(() => {
(transport as any).apiRequest = jest.fn();
});
it('create => should throw an error if an assistant with the same name already exists', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({
data: [{ name: 'name' }],
has_more: false,
});
try {
await assistant.create.execute.call(
createExecuteFunctionsMock({
name: 'name',
options: {
failIfExists: true,
},
}),
0,
);
expect(true).toBe(false);
} catch (error) {
expect(error.message).toBe("An assistant with the same name 'name' already exists");
}
});
it('create => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({});
await assistant.create.execute.call(
createExecuteFunctionsMock({
modelId: 'gpt-model',
name: 'name',
description: 'description',
instructions: 'some instructions',
codeInterpreter: true,
knowledgeRetrieval: true,
file_ids: [],
options: {},
}),
0,
);
expect(transport.apiRequest).toHaveBeenCalledWith('POST', '/assistants', {
body: {
description: 'description',
file_ids: [],
instructions: 'some instructions',
model: 'gpt-model',
name: 'name',
tools: [{ type: 'code_interpreter' }, { type: 'retrieval' }],
},
headers: { 'OpenAI-Beta': 'assistants=v1' },
});
});
it('create => should throw error if more then 20 files selected', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({});
try {
await assistant.create.execute.call(
createExecuteFunctionsMock({
file_ids: Array.from({ length: 25 }),
options: {},
}),
0,
);
expect(true).toBe(false);
} catch (error) {
expect(error.message).toBe(
'The maximum number of files that can be attached to the assistant is 20',
);
}
});
it('delete => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({});
await assistant.deleteAssistant.execute.call(
createExecuteFunctionsMock({
assistantId: 'assistant-id',
}),
0,
);
expect(transport.apiRequest).toHaveBeenCalledWith('DELETE', '/assistants/assistant-id', {
headers: { 'OpenAI-Beta': 'assistants=v1' },
});
});
it('list => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({
data: [
{ name: 'name1', id: 'id-1', model: 'gpt-model', other: 'other' },
{ name: 'name2', id: 'id-2', model: 'gpt-model', other: 'other' },
{ name: 'name3', id: 'id-3', model: 'gpt-model', other: 'other' },
],
has_more: false,
});
const response = await assistant.list.execute.call(
createExecuteFunctionsMock({
simplify: true,
}),
0,
);
expect(response).toEqual([
{
json: { name: 'name1', id: 'id-1', model: 'gpt-model' },
pairedItem: { item: 0 },
},
{
json: { name: 'name2', id: 'id-2', model: 'gpt-model' },
pairedItem: { item: 0 },
},
{
json: { name: 'name3', id: 'id-3', model: 'gpt-model' },
pairedItem: { item: 0 },
},
]);
});
it('update => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({
tools: [{ type: 'existing_tool' }],
});
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({});
await assistant.update.execute.call(
createExecuteFunctionsMock({
assistantId: 'assistant-id',
options: {
modelId: 'gpt-model',
name: 'name',
instructions: 'some instructions',
codeInterpreter: true,
knowledgeRetrieval: true,
file_ids: [],
removeCustomTools: false,
},
}),
0,
);
expect(transport.apiRequest).toHaveBeenCalledTimes(2);
expect(transport.apiRequest).toHaveBeenCalledWith('GET', '/assistants/assistant-id', {
headers: { 'OpenAI-Beta': 'assistants=v1' },
});
expect(transport.apiRequest).toHaveBeenCalledWith('POST', '/assistants/assistant-id', {
body: {
file_ids: [],
instructions: 'some instructions',
model: 'gpt-model',
name: 'name',
tools: [{ type: 'existing_tool' }, { type: 'code_interpreter' }, { type: 'retrieval' }],
},
headers: { 'OpenAI-Beta': 'assistants=v1' },
});
});
});
describe('OpenAi, Audio resource', () => {
beforeEach(() => {
(transport as any).apiRequest = jest.fn();
});
it('generate => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({});
const returnData = await audio.generate.execute.call(
createExecuteFunctionsMock({
model: 'tts-model',
input: 'input',
voice: 'fable',
options: {
response_format: 'flac',
speed: 1.25,
binaryPropertyOutput: 'myData',
},
}),
0,
);
expect(returnData.length).toEqual(1);
expect(returnData[0].binary?.myData).toBeDefined();
expect(returnData[0].pairedItem).toBeDefined();
expect(transport.apiRequest).toHaveBeenCalledWith('POST', '/audio/speech', {
body: {
input: 'input',
model: 'tts-model',
response_format: 'flac',
speed: 1.25,
voice: 'fable',
},
option: { encoding: 'arraybuffer', json: false, returnFullResponse: true, useStream: true },
});
});
it('transcribe => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({ text: 'transcribtion' });
const returnData = await audio.transcribe.execute.call(
createExecuteFunctionsMock({
binaryPropertyName: 'myData',
options: {
language: 'en',
temperature: 1.1,
},
}),
0,
);
expect(returnData.length).toEqual(1);
expect(returnData[0].pairedItem).toBeDefined();
expect(returnData[0].json).toEqual({ text: 'transcribtion' });
expect(transport.apiRequest).toHaveBeenCalledWith(
'POST',
'/audio/transcriptions',
expect.objectContaining({
headers: { 'Content-Type': 'multipart/form-data' },
}),
);
});
it('translate => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({ text: 'translations' });
const returnData = await audio.translate.execute.call(
createExecuteFunctionsMock({
binaryPropertyName: 'myData',
options: {},
}),
0,
);
expect(returnData.length).toEqual(1);
expect(returnData[0].pairedItem).toBeDefined();
expect(returnData[0].json).toEqual({ text: 'translations' });
expect(transport.apiRequest).toHaveBeenCalledWith(
'POST',
'/audio/translations',
expect.objectContaining({
headers: { 'Content-Type': 'multipart/form-data' },
}),
);
});
});
describe('OpenAi, File resource', () => {
beforeEach(() => {
(transport as any).apiRequest = jest.fn();
});
it('deleteFile => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({});
await file.deleteFile.execute.call(
createExecuteFunctionsMock({
fileId: 'file-id',
}),
0,
);
expect(transport.apiRequest).toHaveBeenCalledWith('DELETE', '/files/file-id');
});
it('list => should return list of files', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({
data: [{ file: 'file1' }, { file: 'file2' }, { file: 'file3' }],
});
const returnData = await file.list.execute.call(createExecuteFunctionsMock({ options: {} }), 2);
expect(returnData.length).toEqual(3);
expect(returnData).toEqual([
{
json: { file: 'file1' },
pairedItem: { item: 2 },
},
{
json: { file: 'file2' },
pairedItem: { item: 2 },
},
{
json: { file: 'file3' },
pairedItem: { item: 2 },
},
]);
});
it('upload => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({ success: true });
const returnData = await file.upload.execute.call(
createExecuteFunctionsMock({
binaryPropertyName: 'myData',
options: {},
}),
0,
);
expect(returnData.length).toEqual(1);
expect(returnData[0].pairedItem).toBeDefined();
expect(returnData[0].json).toEqual({ success: true });
expect(transport.apiRequest).toHaveBeenCalledWith(
'POST',
'/files',
expect.objectContaining({
headers: { 'Content-Type': 'multipart/form-data' },
}),
);
});
});
describe('OpenAi, Image resource', () => {
beforeEach(() => {
(transport as any).apiRequest = jest.fn();
});
it('generate => should call apiRequest with correct parameters, return binary', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({ data: [{ b64_json: 'image1' }] });
const returnData = await image.generate.execute.call(
createExecuteFunctionsMock({
model: 'dall-e-3',
prompt: 'cat with a hat',
options: {
size: '1024x1024',
style: 'vivid',
quality: 'hd',
binaryPropertyOutput: 'myData',
},
}),
0,
);
expect(returnData.length).toEqual(1);
expect(returnData[0].binary?.myData).toBeDefined();
expect(returnData[0].pairedItem).toBeDefined();
expect(transport.apiRequest).toHaveBeenCalledWith('POST', '/images/generations', {
body: {
model: 'dall-e-3',
prompt: 'cat with a hat',
quality: 'hd',
response_format: 'b64_json',
size: '1024x1024',
style: 'vivid',
},
});
});
it('generate => should call apiRequest with correct parameters, return urls', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({ data: [{ url: 'image-url' }] });
const returnData = await image.generate.execute.call(
createExecuteFunctionsMock({
model: 'dall-e-3',
prompt: 'cat with a hat',
options: {
size: '1024x1024',
style: 'vivid',
quality: 'hd',
binaryPropertyOutput: 'myData',
returnImageUrls: true,
},
}),
0,
);
expect(returnData.length).toEqual(1);
expect(returnData[0].pairedItem).toBeDefined();
expect(returnData).toEqual([{ json: { url: 'image-url' }, pairedItem: { item: 0 } }]);
expect(transport.apiRequest).toHaveBeenCalledWith('POST', '/images/generations', {
body: {
model: 'dall-e-3',
prompt: 'cat with a hat',
quality: 'hd',
response_format: 'url',
size: '1024x1024',
style: 'vivid',
},
});
});
it('analyze => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({ success: true });
const returnData = await image.analyze.execute.call(
createExecuteFunctionsMock({
text: 'image text',
inputType: 'url',
imageUrls: 'image-url1, image-url2',
options: {
detail: 'low',
},
}),
0,
);
expect(returnData.length).toEqual(1);
expect(returnData[0].pairedItem).toBeDefined();
expect(returnData[0].json).toEqual({ success: true });
expect(transport.apiRequest).toHaveBeenCalledWith('POST', '/chat/completions', {
body: {
max_tokens: 300,
messages: [
{
content: [
{ text: 'image text', type: 'text' },
{ image_url: { detail: 'low', url: 'image-url1' }, type: 'image_url' },
{ image_url: { detail: 'low', url: 'image-url2' }, type: 'image_url' },
],
role: 'user',
},
],
model: 'gpt-4-vision-preview',
},
});
});
});
describe('OpenAi, Text resource', () => {
beforeEach(() => {
(transport as any).apiRequest = jest.fn();
});
it('classify => should call apiRequest with correct parameters', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({ results: [{ flagged: true }] });
const returnData = await text.classify.execute.call(
createExecuteFunctionsMock({
input: 'input',
options: { useStableModel: true },
}),
0,
);
expect(returnData.length).toEqual(1);
expect(returnData[0].pairedItem).toBeDefined();
expect(returnData[0].json).toEqual({ flagged: true });
expect(transport.apiRequest).toHaveBeenCalledWith('POST', '/moderations', {
body: { input: 'input', model: 'text-moderation-stable' },
});
});
it('message => should call apiRequest with correct parameters, no tool call', async () => {
(transport.apiRequest as jest.Mock).mockResolvedValueOnce({
choices: [{ message: { tool_calls: undefined } }],
});
await text.message.execute.call(
createExecuteFunctionsMock({
modelId: 'gpt-model',
messages: {
values: [{ role: 'user', content: 'message' }],
},
options: {},
}),
0,
);
expect(transport.apiRequest).toHaveBeenCalledWith('POST', '/chat/completions', {
body: {
messages: [{ content: 'message', role: 'user' }],
model: 'gpt-model',
response_format: undefined,
tools: undefined,
},
});
});
});

View file

@ -0,0 +1,37 @@
import type {
IDataObject,
IExecuteFunctions,
IHttpRequestMethods,
ILoadOptionsFunctions,
} from 'n8n-workflow';
type RequestParameters = {
headers?: IDataObject;
body?: IDataObject | string;
qs?: IDataObject;
uri?: string;
option?: IDataObject;
};
export async function apiRequest(
this: IExecuteFunctions | ILoadOptionsFunctions,
method: IHttpRequestMethods,
endpoint: string,
parameters?: RequestParameters,
) {
const { body, qs, uri, option, headers } = parameters ?? {};
const options = {
headers,
method,
body,
qs,
uri: uri ?? `https://api.openai.com/v1${endpoint}`,
json: true,
};
if (option && Object.keys(option).length !== 0) {
Object.assign(options, option);
}
return await this.helpers.requestWithAuthentication.call(this, 'openAiApi', options);
}

View file

@ -18,7 +18,9 @@
"format": "prettier nodes credentials --write",
"lint": "eslint nodes credentials",
"lintfix": "eslint nodes credentials --fix",
"watch": "tsc-watch -p tsconfig.build.json --onCompilationComplete \"tsc-alias -p tsconfig.build.json\" --onSuccess \"pnpm n8n-generate-ui-types\""
"watch": "tsc-watch -p tsconfig.build.json --onCompilationComplete \"tsc-alias -p tsconfig.build.json\" --onSuccess \"pnpm n8n-generate-ui-types\"",
"test": "jest",
"test:dev": "jest --watch"
},
"files": [
"dist"
@ -42,6 +44,7 @@
"dist/credentials/ZepApi.credentials.js"
],
"nodes": [
"dist/nodes/vendors/OpenAi/OpenAi.node.js",
"dist/nodes/agents/Agent/Agent.node.js",
"dist/nodes/agents/OpenAiAssistant/OpenAiAssistant.node.js",
"dist/nodes/chains/ChainSummarization/ChainSummarization.node.js",
@ -140,6 +143,7 @@
"cohere-ai": "6.2.2",
"d3-dsv": "2.0.0",
"epub2": "3.0.1",
"form-data": "4.0.0",
"html-to-text": "9.0.5",
"json-schema-to-zod": "1.2.0",
"langchain": "0.0.198",

View file

@ -105,9 +105,12 @@ export function getConnectionHintNoticeField(
if (groupedConnections.size === 1) {
const [[connection, locales]] = Array.from(groupedConnections);
displayName = `This node must be connected to ${determineArticle(
locales[0],
)} ${locales[0].toLowerCase()}. <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${connection}'>Insert one</a>`;
displayName = `This node must be connected to ${determineArticle(locales[0])} ${locales[0]
.toLowerCase()
.replace(
/^ai /,
'AI ',
)}. <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${connection}'>Insert one</a>`;
} else {
const ahrefs = Array.from(groupedConnections, ([connection, locales]) => {
// If there are multiple locales, join them with ' or '

View file

@ -247,7 +247,15 @@ export abstract class DirectoryLoader {
isCustom: boolean;
}) {
try {
const codex = this.getCodex(filePath);
let codex;
if (!isCustom) {
codex = node.description.codex;
}
if (codex === undefined) {
codex = this.getCodex(filePath);
}
if (isCustom) {
codex.categories = codex.categories

View file

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="256" height="260" preserveAspectRatio="xMidYMid"><path d="M239.184 106.203a64.72 64.72 0 0 0-5.576-53.103C219.452 28.459 191 15.784 163.213 21.74A65.586 65.586 0 0 0 52.096 45.22a64.72 64.72 0 0 0-43.23 31.36c-14.31 24.602-11.061 55.634 8.033 76.74a64.67 64.67 0 0 0 5.525 53.102c14.174 24.65 42.644 37.324 70.446 31.36a64.72 64.72 0 0 0 48.754 21.744c28.481.025 53.714-18.361 62.414-45.481a64.77 64.77 0 0 0 43.229-31.36c14.137-24.558 10.875-55.423-8.083-76.483m-97.56 136.338a48.4 48.4 0 0 1-31.105-11.255l1.535-.87 51.67-29.825a8.6 8.6 0 0 0 4.247-7.367v-72.85l21.845 12.636c.218.111.37.32.409.563v60.367c-.056 26.818-21.783 48.545-48.601 48.601M37.158 197.93a48.35 48.35 0 0 1-5.781-32.589l1.534.921 51.722 29.826a8.34 8.34 0 0 0 8.441 0l63.181-36.425v25.221a.87.87 0 0 1-.358.665l-52.335 30.184c-23.257 13.398-52.97 5.431-66.404-17.803M23.549 85.38a48.5 48.5 0 0 1 25.58-21.333v61.39a8.29 8.29 0 0 0 4.195 7.316l62.874 36.272-21.845 12.636a.82.82 0 0 1-.767 0L41.353 151.53c-23.211-13.454-31.171-43.144-17.804-66.405zm179.466 41.695-63.08-36.63L161.73 77.86a.82.82 0 0 1 .768 0l52.233 30.184a48.6 48.6 0 0 1-7.316 87.635v-61.391a8.54 8.54 0 0 0-4.4-7.213m21.742-32.69-1.535-.922-51.619-30.081a8.39 8.39 0 0 0-8.492 0L99.98 99.808V74.587a.72.72 0 0 1 .307-.665l52.233-30.133a48.652 48.652 0 0 1 72.236 50.391zM88.061 139.097l-21.845-12.585a.87.87 0 0 1-.41-.614V65.685a48.652 48.652 0 0 1 79.757-37.346l-1.535.87-51.67 29.825a8.6 8.6 0 0 0-4.246 7.367zm11.868-25.58L128.067 97.3l28.188 16.218v32.434l-28.086 16.218-28.188-16.218z"/></svg>

After

Width:  |  Height:  |  Size: 1.6 KiB

View file

@ -13,6 +13,8 @@ import {
REGULAR_NODE_CREATOR_VIEW,
TRIGGER_NODE_CREATOR_VIEW,
CUSTOM_API_CALL_KEY,
OPEN_AI_NODE_MESSAGE_ASSISTANT_TYPE,
OPEN_AI_NODE_TYPE,
} from '@/constants';
import { useUsersStore } from '@/stores/users.store';
@ -24,6 +26,7 @@ import { useViewStacks } from '../composables/useViewStacks';
import ItemsRenderer from '../Renderers/ItemsRenderer.vue';
import CategorizedItemsRenderer from '../Renderers/CategorizedItemsRenderer.vue';
import type { IDataObject } from 'n8n-workflow';
const emit = defineEmits({
nodeTypeSelected: (nodeTypes: string[]) => true,
@ -145,6 +148,12 @@ function onSelected(actionCreateElement: INodeCreateElement) {
const actionNode = actions.value[0].key;
emit('nodeTypeSelected', [actionData.key as string, actionNode]);
} else if (
actionData.key === OPEN_AI_NODE_TYPE &&
(actionData?.value as IDataObject)?.resource === 'assistant' &&
(actionData?.value as IDataObject)?.operation === 'message'
) {
emit('nodeTypeSelected', [OPEN_AI_NODE_MESSAGE_ASSISTANT_TYPE]);
} else {
emit('nodeTypeSelected', [actionData.key as string]);
}

View file

@ -18,6 +18,8 @@ import {
NODE_CREATOR_OPEN_SOURCES,
NO_OP_NODE_TYPE,
OPEN_AI_ASSISTANT_NODE_TYPE,
OPEN_AI_NODE_MESSAGE_ASSISTANT_TYPE,
OPEN_AI_NODE_TYPE,
QA_CHAIN_NODE_TYPE,
SCHEDULE_TRIGGER_NODE_TYPE,
SPLIT_IN_BATCHES_NODE_TYPE,
@ -188,6 +190,7 @@ export const useActions = () => {
AGENT_NODE_TYPE,
BASIC_CHAIN_NODE_TYPE,
OPEN_AI_ASSISTANT_NODE_TYPE,
OPEN_AI_NODE_MESSAGE_ASSISTANT_TYPE,
];
const isChatTriggerMissing =
@ -228,6 +231,10 @@ export const useActions = () => {
}
addedNodes.forEach((node, index) => {
if (node.type === OPEN_AI_NODE_MESSAGE_ASSISTANT_TYPE) {
node.type = OPEN_AI_NODE_TYPE;
}
nodes.push(node);
switch (node.type) {

View file

@ -97,7 +97,9 @@ interface NodeView {
function getAiNodesBySubcategory(nodes: INodeTypeDescription[], subcategory: string) {
return nodes
.filter((node) => node.codex?.subcategories?.[AI_SUBCATEGORY]?.includes(subcategory))
.filter(
(node) => !node.hidden && node.codex?.subcategories?.[AI_SUBCATEGORY]?.includes(subcategory),
)
.map((node) => ({
key: node.name,
type: 'node',
@ -109,6 +111,13 @@ function getAiNodesBySubcategory(nodes: INodeTypeDescription[], subcategory: str
description: node.description,
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
icon: node.icon!,
iconData: node.name.toLowerCase().includes('openai')
? {
type: 'file',
icon: 'openai',
fileBuffer: '/static/open-ai.svg',
}
: undefined,
},
}))
.sort((a, b) => a.properties.displayName.localeCompare(b.properties.displayName));

View file

@ -130,6 +130,9 @@ export const MANUAL_TRIGGER_NODE_TYPE = 'n8n-nodes-base.manualTrigger';
export const MANUAL_CHAT_TRIGGER_NODE_TYPE = '@n8n/n8n-nodes-langchain.manualChatTrigger';
export const CHAT_TRIGGER_NODE_TYPE = '@n8n/n8n-nodes-langchain.chatTrigger';
export const AGENT_NODE_TYPE = '@n8n/n8n-nodes-langchain.agent';
export const OPEN_AI_NODE_TYPE = '@n8n/n8n-nodes-langchain.openAi';
export const OPEN_AI_NODE_MESSAGE_ASSISTANT_TYPE =
'@n8n/n8n-nodes-langchain.openAi.assistant.message';
export const OPEN_AI_ASSISTANT_NODE_TYPE = '@n8n/n8n-nodes-langchain.openAiAssistant';
export const BASIC_CHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.chainLlm';
export const QA_CHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.chainRetrievalQa';

View file

@ -2,11 +2,13 @@ import type { INodeType, INodeTypeDescription } from 'n8n-workflow';
import { imageFields, imageOperations } from './ImageDescription';
import { textFields, textOperations } from './TextDescription';
import { chatFields, chatOperations } from './ChatDescription';
import { oldVersionNotice } from '../../utils/descriptions';
export class OpenAi implements INodeType {
description: INodeTypeDescription = {
displayName: 'OpenAI',
name: 'openAi',
hidden: true,
icon: 'file:openAi.svg',
group: ['transform'],
version: [1, 1.1],
@ -28,13 +30,7 @@ export class OpenAi implements INodeType {
baseURL: 'https://api.openai.com',
},
properties: [
{
displayName:
'For more advanced uses, consider using an <a data-action="openSelectiveNodeCreator" data-action-parameter-creatorview="AI">advanced AI</a> node',
name: 'noticeAdvanceAi',
type: 'notice',
default: '',
},
oldVersionNotice,
{
displayName: 'Resource',
name: 'resource',

View file

@ -33,6 +33,7 @@ export {
fileTypeFromMimeType,
assert,
removeCircularRefs,
updateDisplayOptions,
} from './utils';
export {
isINodeProperties,

View file

@ -1,7 +1,9 @@
import FormData from 'form-data';
import type { BinaryFileType, JsonObject } from './Interfaces';
import type { BinaryFileType, IDisplayOptions, INodeProperties, JsonObject } from './Interfaces';
import { ApplicationError } from './errors/application.error';
import { merge } from 'lodash';
const readStreamClasses = new Set(['ReadStream', 'Readable', 'ReadableStream']);
// NOTE: BigInt.prototype.toJSON is not available, which causes JSON.stringify to throw an error
@ -165,3 +167,15 @@ export const removeCircularRefs = (obj: JsonObject, seen = new Set()) => {
}
});
};
export function updateDisplayOptions(
displayOptions: IDisplayOptions,
properties: INodeProperties[],
) {
return properties.map((nodeProperty) => {
return {
...nodeProperty,
displayOptions: merge({}, nodeProperty.displayOptions, displayOptions),
};
});
}

View file

@ -226,6 +226,9 @@ importers:
epub2:
specifier: 3.0.1
version: 3.0.1(ts-toolbelt@9.6.0)
form-data:
specifier: 4.0.0
version: 4.0.0
html-to-text:
specifier: 9.0.5
version: 9.0.5