n8n/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

595 lines
16 KiB
TypeScript
Raw Normal View History

ci: Introduce `no-plain-errors` lint rule for BE packages (no-changelog) (#7961) ## Summary Require `ApplicationError` or its child classes instead of plain `Error` in BE packages. This ensures the error will be normalized when reported to Sentry, if applicable. Follow-up to: https://github.com/n8n-io/n8n/pulls?q=is%3Apr+is%3Aclosed+applicationerror ... #### How to test the change: 1. ... ## Issues fixed Include links to Github issue or Community forum post or **Linear ticket**: > Important in order to close automatically and provide context to reviewers ... ## Review / Merge checklist - [ ] PR title and summary are descriptive. **Remember, the title automatically goes into the changelog. Use `(no-changelog)` otherwise.** ([conventions](https://github.com/n8n-io/n8n/blob/master/.github/pull_request_title_conventions.md)) - [ ] [Docs updated](https://github.com/n8n-io/n8n-docs) or follow-up ticket created. - [ ] Tests included. > A bug is not considered fixed, unless a test is added to prevent it from happening again. A feature is not complete without tests. > > *(internal)* You can use Slack commands to trigger [e2e tests](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#a39f9e5ba64a48b58a71d81c837e8227) or [deploy test instance](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#f6a177d32bde4b57ae2da0b8e454bfce) or [deploy early access version on Cloud](https://www.notion.so/n8n/Cloudbot-3dbe779836004972b7057bc989526998?pvs=4#fef2d36ab02247e1a0f65a74f6fb534e).
2023-12-08 03:51:49 -08:00
import { ApplicationError, NodeConnectionType, NodeOperationError } from 'n8n-workflow';
import type {
IBinaryData,
IDataObject,
IExecuteFunctions,
INodeExecutionData,
INodeType,
INodeTypeDescription,
} from 'n8n-workflow';
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
import {
AIMessagePromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
ChatPromptTemplate,
} from '@langchain/core/prompts';
import type { BaseOutputParser } from '@langchain/core/output_parsers';
import { CombiningOutputParser } from 'langchain/output_parsers';
import { LLMChain } from 'langchain/chains';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { HumanMessage } from '@langchain/core/messages';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { getTemplateNoticeField } from '../../../utils/sharedFields';
import {
getOptionalOutputParsers,
getPromptInputByType,
isChatInstance,
} from '../../../utils/helpers';
import { getTracingConfig } from '../../../utils/tracing';
interface MessagesTemplate {
type: string;
message: string;
messageType: 'text' | 'imageBinary' | 'imageUrl';
binaryImageDataKey?: string;
imageUrl?: string;
imageDetail?: 'auto' | 'low' | 'high';
}
async function getImageMessage(
context: IExecuteFunctions,
itemIndex: number,
message: MessagesTemplate,
) {
if (message.messageType !== 'imageBinary' && message.messageType !== 'imageUrl') {
// eslint-disable-next-line n8n-nodes-base/node-execute-block-wrong-error-thrown
throw new NodeOperationError(
context.getNode(),
'Invalid message type. Only imageBinary and imageUrl are supported',
);
}
const detail = message.imageDetail === 'auto' ? undefined : message.imageDetail;
if (message.messageType === 'imageUrl' && message.imageUrl) {
return new HumanMessage({
content: [
{
type: 'image_url',
image_url: {
url: message.imageUrl,
detail,
},
},
],
});
}
const binaryDataKey = message.binaryImageDataKey ?? 'data';
const inputData = context.getInputData()[itemIndex];
const binaryData = inputData.binary?.[binaryDataKey] as IBinaryData;
if (!binaryData) {
throw new NodeOperationError(context.getNode(), 'No binary data set.');
}
const bufferData = await context.helpers.getBinaryDataBuffer(itemIndex, binaryDataKey);
const model = (await context.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;
const dataURI = `data:image/jpeg;base64,${bufferData.toString('base64')}`;
const directUriModels = [ChatGoogleGenerativeAI, ChatOllama];
const imageUrl = directUriModels.some((i) => model instanceof i)
? dataURI
: { url: dataURI, detail };
return new HumanMessage({
content: [
{
type: 'image_url',
image_url: imageUrl,
},
],
});
}
async function getChainPromptTemplate(
context: IExecuteFunctions,
itemIndex: number,
llm: BaseLanguageModel | BaseChatModel,
messages?: MessagesTemplate[],
formatInstructions?: string,
query?: string,
) {
const queryTemplate = new PromptTemplate({
template: `{query}${formatInstructions ? '\n{formatInstructions}' : ''}`,
inputVariables: ['query'],
partialVariables: formatInstructions ? { formatInstructions } : undefined,
});
if (isChatInstance(llm)) {
const parsedMessages = await Promise.all(
(messages ?? []).map(async (message) => {
const messageClass = [
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
].find((m) => m.lc_name() === message.type);
if (!messageClass) {
// eslint-disable-next-line n8n-nodes-base/node-execute-block-wrong-error-thrown
ci: Introduce `no-plain-errors` lint rule for BE packages (no-changelog) (#7961) ## Summary Require `ApplicationError` or its child classes instead of plain `Error` in BE packages. This ensures the error will be normalized when reported to Sentry, if applicable. Follow-up to: https://github.com/n8n-io/n8n/pulls?q=is%3Apr+is%3Aclosed+applicationerror ... #### How to test the change: 1. ... ## Issues fixed Include links to Github issue or Community forum post or **Linear ticket**: > Important in order to close automatically and provide context to reviewers ... ## Review / Merge checklist - [ ] PR title and summary are descriptive. **Remember, the title automatically goes into the changelog. Use `(no-changelog)` otherwise.** ([conventions](https://github.com/n8n-io/n8n/blob/master/.github/pull_request_title_conventions.md)) - [ ] [Docs updated](https://github.com/n8n-io/n8n-docs) or follow-up ticket created. - [ ] Tests included. > A bug is not considered fixed, unless a test is added to prevent it from happening again. A feature is not complete without tests. > > *(internal)* You can use Slack commands to trigger [e2e tests](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#a39f9e5ba64a48b58a71d81c837e8227) or [deploy test instance](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#f6a177d32bde4b57ae2da0b8e454bfce) or [deploy early access version on Cloud](https://www.notion.so/n8n/Cloudbot-3dbe779836004972b7057bc989526998?pvs=4#fef2d36ab02247e1a0f65a74f6fb534e).
2023-12-08 03:51:49 -08:00
throw new ApplicationError('Invalid message type', {
extra: { messageType: message.type },
});
}
if (messageClass === HumanMessagePromptTemplate && message.messageType !== 'text') {
const test = await getImageMessage(context, itemIndex, message);
return test;
}
const res = messageClass.fromTemplate(
// Since we're using the message as template, we need to escape any curly braces
// so LangChain doesn't try to parse them as variables
(message.message || '').replace(/[{}]/g, (match) => match + match),
);
return res;
}),
);
const lastMessage = parsedMessages[parsedMessages.length - 1];
// If the last message is a human message and it has an array of content, we need to add the query to the last message
if (lastMessage instanceof HumanMessage && Array.isArray(lastMessage.content)) {
const humanMessage = new HumanMessagePromptTemplate(queryTemplate);
const test = await humanMessage.format({ query });
lastMessage.content.push({ text: test.content.toString(), type: 'text' });
} else {
parsedMessages.push(new HumanMessagePromptTemplate(queryTemplate));
}
return ChatPromptTemplate.fromMessages(parsedMessages);
}
return queryTemplate;
}
async function createSimpleLLMChain(
context: IExecuteFunctions,
llm: BaseLanguageModel,
query: string,
prompt: ChatPromptTemplate | PromptTemplate,
): Promise<string[]> {
const chain = new LLMChain({
llm,
prompt,
}).withConfig(getTracingConfig(context));
const response = (await chain.invoke({
query,
signal: context.getExecutionCancelSignal(),
})) as string[];
return Array.isArray(response) ? response : [response];
}
async function getChain(
context: IExecuteFunctions,
itemIndex: number,
query: string,
llm: BaseLanguageModel,
outputParsers: BaseOutputParser[],
messages?: MessagesTemplate[],
): Promise<unknown[]> {
const chatTemplate: ChatPromptTemplate | PromptTemplate = await getChainPromptTemplate(
context,
itemIndex,
llm,
messages,
undefined,
query,
);
// If there are no output parsers, create a simple LLM chain and execute the query
if (!outputParsers.length) {
return await createSimpleLLMChain(context, llm, query, chatTemplate);
}
// If there's only one output parser, use it; otherwise, create a combined output parser
const combinedOutputParser =
outputParsers.length === 1 ? outputParsers[0] : new CombiningOutputParser(...outputParsers);
const formatInstructions = combinedOutputParser.getFormatInstructions();
// Create a prompt template incorporating the format instructions and query
const prompt = await getChainPromptTemplate(
context,
itemIndex,
llm,
messages,
formatInstructions,
query,
);
const chain = prompt.pipe(llm).pipe(combinedOutputParser);
const response = (await chain.withConfig(getTracingConfig(context)).invoke({ query })) as
| string
| string[];
return Array.isArray(response) ? response : [response];
}
feat: AI nodes usability fixes + Summarization Chain V2 (#7949) Fixes: - Refactor connection snapping when dragging and enable it also for non-main connection types - Fix propagation of errors from sub-nodes - Fix chat scrolling when sending/receiving messages - Prevent empty chat messages - Fix sub-node selected styles - Fix output names text overflow Usability improvements: - Auto-add manual chat trigger for agents & chain nodes - Various labels and description updates - Make the output parser input optional for Basic LLM Chain - Summarization Chain V2 with a simplified document loader & text chunking mode #### How to test the change: Example workflow showcasing different operation mode of the new summarization chain: [Summarization_V2.json](https://github.com/n8n-io/n8n/files/13599901/Summarization_V2.json) ## Issues fixed Include links to Github issue or Community forum post or **Linear ticket**: > Important in order to close automatically and provide context to reviewers - https://www.notion.so/n8n/David-Langchain-Posthog-notes-7a9294938420403095f4508f1a21d31d - https://linear.app/n8n/issue/N8N-7070/ux-fixes-batch - https://linear.app/n8n/issue/N8N-7071/ai-sub-node-bugs ## Review / Merge checklist - [x] PR title and summary are descriptive. **Remember, the title automatically goes into the changelog. Use `(no-changelog)` otherwise.** ([conventions](https://github.com/n8n-io/n8n/blob/master/.github/pull_request_title_conventions.md)) - [x] [Docs updated](https://github.com/n8n-io/n8n-docs) or follow-up ticket created. - [ ] Tests included. > A bug is not considered fixed, unless a test is added to prevent it from happening again. A feature is not complete without tests. > > *(internal)* You can use Slack commands to trigger [e2e tests](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#a39f9e5ba64a48b58a71d81c837e8227) or [deploy test instance](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#f6a177d32bde4b57ae2da0b8e454bfce) or [deploy early access version on Cloud](https://www.notion.so/n8n/Cloudbot-3dbe779836004972b7057bc989526998?pvs=4#fef2d36ab02247e1a0f65a74f6fb534e). --------- Signed-off-by: Oleg Ivaniv <me@olegivaniv.com> Co-authored-by: Elias Meire <elias@meire.dev>
2023-12-08 04:42:32 -08:00
function getInputs(parameters: IDataObject) {
const hasOutputParser = parameters?.hasOutputParser;
const inputs = [
{ displayName: '', type: NodeConnectionType.Main },
{
displayName: 'Model',
maxConnections: 1,
type: NodeConnectionType.AiLanguageModel,
required: true,
},
];
// If `hasOutputParser` is undefined it must be version 1.3 or earlier so we
feat: AI nodes usability fixes + Summarization Chain V2 (#7949) Fixes: - Refactor connection snapping when dragging and enable it also for non-main connection types - Fix propagation of errors from sub-nodes - Fix chat scrolling when sending/receiving messages - Prevent empty chat messages - Fix sub-node selected styles - Fix output names text overflow Usability improvements: - Auto-add manual chat trigger for agents & chain nodes - Various labels and description updates - Make the output parser input optional for Basic LLM Chain - Summarization Chain V2 with a simplified document loader & text chunking mode #### How to test the change: Example workflow showcasing different operation mode of the new summarization chain: [Summarization_V2.json](https://github.com/n8n-io/n8n/files/13599901/Summarization_V2.json) ## Issues fixed Include links to Github issue or Community forum post or **Linear ticket**: > Important in order to close automatically and provide context to reviewers - https://www.notion.so/n8n/David-Langchain-Posthog-notes-7a9294938420403095f4508f1a21d31d - https://linear.app/n8n/issue/N8N-7070/ux-fixes-batch - https://linear.app/n8n/issue/N8N-7071/ai-sub-node-bugs ## Review / Merge checklist - [x] PR title and summary are descriptive. **Remember, the title automatically goes into the changelog. Use `(no-changelog)` otherwise.** ([conventions](https://github.com/n8n-io/n8n/blob/master/.github/pull_request_title_conventions.md)) - [x] [Docs updated](https://github.com/n8n-io/n8n-docs) or follow-up ticket created. - [ ] Tests included. > A bug is not considered fixed, unless a test is added to prevent it from happening again. A feature is not complete without tests. > > *(internal)* You can use Slack commands to trigger [e2e tests](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#a39f9e5ba64a48b58a71d81c837e8227) or [deploy test instance](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#f6a177d32bde4b57ae2da0b8e454bfce) or [deploy early access version on Cloud](https://www.notion.so/n8n/Cloudbot-3dbe779836004972b7057bc989526998?pvs=4#fef2d36ab02247e1a0f65a74f6fb534e). --------- Signed-off-by: Oleg Ivaniv <me@olegivaniv.com> Co-authored-by: Elias Meire <elias@meire.dev>
2023-12-08 04:42:32 -08:00
// always add the output parser input
if (hasOutputParser === undefined || hasOutputParser === true) {
inputs.push({ displayName: 'Output Parser', type: NodeConnectionType.AiOutputParser });
}
return inputs;
}
export class ChainLlm implements INodeType {
description: INodeTypeDescription = {
displayName: 'Basic LLM Chain',
name: 'chainLlm',
icon: 'fa:link',
group: ['transform'],
version: [1, 1.1, 1.2, 1.3, 1.4],
description: 'A simple chain to prompt a large language model',
defaults: {
name: 'Basic LLM Chain',
color: '#909298',
},
codex: {
alias: ['LangChain'],
categories: ['AI'],
subcategories: {
AI: ['Chains'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.chainllm/',
},
],
},
},
feat: AI nodes usability fixes + Summarization Chain V2 (#7949) Fixes: - Refactor connection snapping when dragging and enable it also for non-main connection types - Fix propagation of errors from sub-nodes - Fix chat scrolling when sending/receiving messages - Prevent empty chat messages - Fix sub-node selected styles - Fix output names text overflow Usability improvements: - Auto-add manual chat trigger for agents & chain nodes - Various labels and description updates - Make the output parser input optional for Basic LLM Chain - Summarization Chain V2 with a simplified document loader & text chunking mode #### How to test the change: Example workflow showcasing different operation mode of the new summarization chain: [Summarization_V2.json](https://github.com/n8n-io/n8n/files/13599901/Summarization_V2.json) ## Issues fixed Include links to Github issue or Community forum post or **Linear ticket**: > Important in order to close automatically and provide context to reviewers - https://www.notion.so/n8n/David-Langchain-Posthog-notes-7a9294938420403095f4508f1a21d31d - https://linear.app/n8n/issue/N8N-7070/ux-fixes-batch - https://linear.app/n8n/issue/N8N-7071/ai-sub-node-bugs ## Review / Merge checklist - [x] PR title and summary are descriptive. **Remember, the title automatically goes into the changelog. Use `(no-changelog)` otherwise.** ([conventions](https://github.com/n8n-io/n8n/blob/master/.github/pull_request_title_conventions.md)) - [x] [Docs updated](https://github.com/n8n-io/n8n-docs) or follow-up ticket created. - [ ] Tests included. > A bug is not considered fixed, unless a test is added to prevent it from happening again. A feature is not complete without tests. > > *(internal)* You can use Slack commands to trigger [e2e tests](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#a39f9e5ba64a48b58a71d81c837e8227) or [deploy test instance](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#f6a177d32bde4b57ae2da0b8e454bfce) or [deploy early access version on Cloud](https://www.notion.so/n8n/Cloudbot-3dbe779836004972b7057bc989526998?pvs=4#fef2d36ab02247e1a0f65a74f6fb534e). --------- Signed-off-by: Oleg Ivaniv <me@olegivaniv.com> Co-authored-by: Elias Meire <elias@meire.dev>
2023-12-08 04:42:32 -08:00
inputs: `={{ ((parameter) => { ${getInputs.toString()}; return getInputs(parameter) })($parameter) }}`,
outputs: [NodeConnectionType.Main],
credentials: [],
properties: [
feat: AI nodes usability fixes + Summarization Chain V2 (#7949) Fixes: - Refactor connection snapping when dragging and enable it also for non-main connection types - Fix propagation of errors from sub-nodes - Fix chat scrolling when sending/receiving messages - Prevent empty chat messages - Fix sub-node selected styles - Fix output names text overflow Usability improvements: - Auto-add manual chat trigger for agents & chain nodes - Various labels and description updates - Make the output parser input optional for Basic LLM Chain - Summarization Chain V2 with a simplified document loader & text chunking mode #### How to test the change: Example workflow showcasing different operation mode of the new summarization chain: [Summarization_V2.json](https://github.com/n8n-io/n8n/files/13599901/Summarization_V2.json) ## Issues fixed Include links to Github issue or Community forum post or **Linear ticket**: > Important in order to close automatically and provide context to reviewers - https://www.notion.so/n8n/David-Langchain-Posthog-notes-7a9294938420403095f4508f1a21d31d - https://linear.app/n8n/issue/N8N-7070/ux-fixes-batch - https://linear.app/n8n/issue/N8N-7071/ai-sub-node-bugs ## Review / Merge checklist - [x] PR title and summary are descriptive. **Remember, the title automatically goes into the changelog. Use `(no-changelog)` otherwise.** ([conventions](https://github.com/n8n-io/n8n/blob/master/.github/pull_request_title_conventions.md)) - [x] [Docs updated](https://github.com/n8n-io/n8n-docs) or follow-up ticket created. - [ ] Tests included. > A bug is not considered fixed, unless a test is added to prevent it from happening again. A feature is not complete without tests. > > *(internal)* You can use Slack commands to trigger [e2e tests](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#a39f9e5ba64a48b58a71d81c837e8227) or [deploy test instance](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#f6a177d32bde4b57ae2da0b8e454bfce) or [deploy early access version on Cloud](https://www.notion.so/n8n/Cloudbot-3dbe779836004972b7057bc989526998?pvs=4#fef2d36ab02247e1a0f65a74f6fb534e). --------- Signed-off-by: Oleg Ivaniv <me@olegivaniv.com> Co-authored-by: Elias Meire <elias@meire.dev>
2023-12-08 04:42:32 -08:00
getTemplateNoticeField(1978),
{
displayName: 'Prompt',
name: 'prompt',
type: 'string',
required: true,
default: '={{ $json.input }}',
displayOptions: {
show: {
'@version': [1],
},
},
},
{
displayName: 'Prompt',
name: 'prompt',
type: 'string',
required: true,
default: '={{ $json.chat_input }}',
displayOptions: {
show: {
feat: AI nodes usability fixes + Summarization Chain V2 (#7949) Fixes: - Refactor connection snapping when dragging and enable it also for non-main connection types - Fix propagation of errors from sub-nodes - Fix chat scrolling when sending/receiving messages - Prevent empty chat messages - Fix sub-node selected styles - Fix output names text overflow Usability improvements: - Auto-add manual chat trigger for agents & chain nodes - Various labels and description updates - Make the output parser input optional for Basic LLM Chain - Summarization Chain V2 with a simplified document loader & text chunking mode #### How to test the change: Example workflow showcasing different operation mode of the new summarization chain: [Summarization_V2.json](https://github.com/n8n-io/n8n/files/13599901/Summarization_V2.json) ## Issues fixed Include links to Github issue or Community forum post or **Linear ticket**: > Important in order to close automatically and provide context to reviewers - https://www.notion.so/n8n/David-Langchain-Posthog-notes-7a9294938420403095f4508f1a21d31d - https://linear.app/n8n/issue/N8N-7070/ux-fixes-batch - https://linear.app/n8n/issue/N8N-7071/ai-sub-node-bugs ## Review / Merge checklist - [x] PR title and summary are descriptive. **Remember, the title automatically goes into the changelog. Use `(no-changelog)` otherwise.** ([conventions](https://github.com/n8n-io/n8n/blob/master/.github/pull_request_title_conventions.md)) - [x] [Docs updated](https://github.com/n8n-io/n8n-docs) or follow-up ticket created. - [ ] Tests included. > A bug is not considered fixed, unless a test is added to prevent it from happening again. A feature is not complete without tests. > > *(internal)* You can use Slack commands to trigger [e2e tests](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#a39f9e5ba64a48b58a71d81c837e8227) or [deploy test instance](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#f6a177d32bde4b57ae2da0b8e454bfce) or [deploy early access version on Cloud](https://www.notion.so/n8n/Cloudbot-3dbe779836004972b7057bc989526998?pvs=4#fef2d36ab02247e1a0f65a74f6fb534e). --------- Signed-off-by: Oleg Ivaniv <me@olegivaniv.com> Co-authored-by: Elias Meire <elias@meire.dev>
2023-12-08 04:42:32 -08:00
'@version': [1.1, 1.2],
},
},
},
{
displayName: 'Prompt',
name: 'prompt',
type: 'string',
required: true,
default: '={{ $json.chatInput }}',
displayOptions: {
show: {
'@version': [1.3],
},
},
},
{
displayName: 'Prompt',
name: 'promptType',
type: 'options',
options: [
{
// eslint-disable-next-line n8n-nodes-base/node-param-display-name-miscased
name: 'Take from previous node automatically',
value: 'auto',
description: 'Looks for an input field called chatInput',
},
{
// eslint-disable-next-line n8n-nodes-base/node-param-display-name-miscased
name: 'Define below',
value: 'define',
description:
'Use an expression to reference data in previous nodes or enter static text',
},
],
displayOptions: {
hide: {
'@version': [1, 1.1, 1.2, 1.3],
},
},
default: 'auto',
},
{
displayName: 'Text',
name: 'text',
type: 'string',
required: true,
default: '',
placeholder: 'e.g. Hello, how can you help me?',
typeOptions: {
rows: 2,
},
displayOptions: {
show: {
promptType: ['define'],
},
},
},
{
displayName: 'Require Specific Output Format',
name: 'hasOutputParser',
type: 'boolean',
default: false,
noDataExpression: true,
displayOptions: {
hide: {
'@version': [1, 1.1, 1.3],
},
},
},
{
displayName: 'Chat Messages (if Using a Chat Model)',
name: 'messages',
type: 'fixedCollection',
typeOptions: {
multipleValues: true,
},
default: {},
placeholder: 'Add prompt',
options: [
{
name: 'messageValues',
displayName: 'Prompt',
values: [
{
displayName: 'Type Name or ID',
name: 'type',
type: 'options',
options: [
{
name: 'AI',
value: AIMessagePromptTemplate.lc_name(),
},
{
name: 'System',
value: SystemMessagePromptTemplate.lc_name(),
},
{
name: 'User',
value: HumanMessagePromptTemplate.lc_name(),
},
],
default: SystemMessagePromptTemplate.lc_name(),
},
{
displayName: 'Message Type',
name: 'messageType',
type: 'options',
displayOptions: {
show: {
type: [HumanMessagePromptTemplate.lc_name()],
},
},
options: [
{
name: 'Text',
value: 'text',
description: 'Simple text message',
},
{
name: 'Image (Binary)',
value: 'imageBinary',
description: 'Process the binary input from the previous node',
},
{
name: 'Image (URL)',
value: 'imageUrl',
description: 'Process the image from the specified URL',
},
],
default: 'text',
},
{
displayName: 'Image Data Field Name',
name: 'binaryImageDataKey',
type: 'string',
default: 'data',
required: true,
description:
'The name of the field in the chains input that contains the binary image file to be processed',
displayOptions: {
show: {
messageType: ['imageBinary'],
},
},
},
{
displayName: 'Image URL',
name: 'imageUrl',
type: 'string',
default: '',
required: true,
description: 'URL to the image to be processed',
displayOptions: {
show: {
messageType: ['imageUrl'],
},
},
},
{
displayName: 'Image Details',
description:
'Control how the model processes the image and generates its textual understanding',
name: 'imageDetail',
type: 'options',
displayOptions: {
show: {
type: [HumanMessagePromptTemplate.lc_name()],
messageType: ['imageBinary', 'imageUrl'],
},
},
options: [
{
name: 'Auto',
value: 'auto',
description:
'Model will use the auto setting which will look at the image input size and decide if it should use the low or high setting',
},
{
name: 'Low',
value: 'low',
description:
'The model will receive a low-res 512px x 512px version of the image, and represent the image with a budget of 65 tokens. This allows the API to return faster responses and consume fewer input tokens for use cases that do not require high detail.',
},
{
name: 'High',
value: 'high',
description:
'Allows the model to see the low res image and then creates detailed crops of input images as 512px squares based on the input image size. Each of the detailed crops uses twice the token budget (65 tokens) for a total of 129 tokens.',
},
],
default: 'auto',
},
{
displayName: 'Message',
name: 'message',
type: 'string',
required: true,
displayOptions: {
hide: {
messageType: ['imageBinary', 'imageUrl'],
},
},
default: '',
},
],
},
],
},
feat: AI nodes usability fixes + Summarization Chain V2 (#7949) Fixes: - Refactor connection snapping when dragging and enable it also for non-main connection types - Fix propagation of errors from sub-nodes - Fix chat scrolling when sending/receiving messages - Prevent empty chat messages - Fix sub-node selected styles - Fix output names text overflow Usability improvements: - Auto-add manual chat trigger for agents & chain nodes - Various labels and description updates - Make the output parser input optional for Basic LLM Chain - Summarization Chain V2 with a simplified document loader & text chunking mode #### How to test the change: Example workflow showcasing different operation mode of the new summarization chain: [Summarization_V2.json](https://github.com/n8n-io/n8n/files/13599901/Summarization_V2.json) ## Issues fixed Include links to Github issue or Community forum post or **Linear ticket**: > Important in order to close automatically and provide context to reviewers - https://www.notion.so/n8n/David-Langchain-Posthog-notes-7a9294938420403095f4508f1a21d31d - https://linear.app/n8n/issue/N8N-7070/ux-fixes-batch - https://linear.app/n8n/issue/N8N-7071/ai-sub-node-bugs ## Review / Merge checklist - [x] PR title and summary are descriptive. **Remember, the title automatically goes into the changelog. Use `(no-changelog)` otherwise.** ([conventions](https://github.com/n8n-io/n8n/blob/master/.github/pull_request_title_conventions.md)) - [x] [Docs updated](https://github.com/n8n-io/n8n-docs) or follow-up ticket created. - [ ] Tests included. > A bug is not considered fixed, unless a test is added to prevent it from happening again. A feature is not complete without tests. > > *(internal)* You can use Slack commands to trigger [e2e tests](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#a39f9e5ba64a48b58a71d81c837e8227) or [deploy test instance](https://www.notion.so/n8n/How-to-use-Test-Instances-d65f49dfc51f441ea44367fb6f67eb0a?pvs=4#f6a177d32bde4b57ae2da0b8e454bfce) or [deploy early access version on Cloud](https://www.notion.so/n8n/Cloudbot-3dbe779836004972b7057bc989526998?pvs=4#fef2d36ab02247e1a0f65a74f6fb534e). --------- Signed-off-by: Oleg Ivaniv <me@olegivaniv.com> Co-authored-by: Elias Meire <elias@meire.dev>
2023-12-08 04:42:32 -08:00
{
displayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionType.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,
name: 'notice',
type: 'notice',
default: '',
displayOptions: {
show: {
hasOutputParser: [true],
},
},
},
],
};
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
this.logger.verbose('Executing LLM Chain');
const items = this.getInputData();
const returnData: INodeExecutionData[] = [];
const llm = (await this.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;
const outputParsers = await getOptionalOutputParsers(this);
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
try {
let prompt: string;
if (this.getNode().typeVersion <= 1.3) {
prompt = this.getNodeParameter('prompt', itemIndex) as string;
} else {
prompt = getPromptInputByType({
ctx: this,
i: itemIndex,
inputKey: 'text',
promptTypeKey: 'promptType',
});
}
const messages = this.getNodeParameter(
'messages.messageValues',
itemIndex,
[],
) as MessagesTemplate[];
if (prompt === undefined) {
throw new NodeOperationError(this.getNode(), "The 'prompt' parameter is empty.");
}
const responses = await getChain(this, itemIndex, prompt, llm, outputParsers, messages);
responses.forEach((response) => {
let data: IDataObject;
if (typeof response === 'string') {
data = {
response: {
text: response.trim(),
},
};
} else if (Array.isArray(response)) {
data = {
data: response,
};
} else if (response instanceof Object) {
data = response as IDataObject;
} else {
data = {
response: {
text: response,
},
};
}
returnData.push({
json: data,
});
});
} catch (error) {
if (this.continueOnFail()) {
returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });
continue;
}
throw error;
}
}
return [returnData];
}
}