WIP: Agent V2 using langraph

This commit is contained in:
Oleg Ivaniv 2024-12-10 07:54:16 +01:00
parent 5fc3bf631d
commit f28fda5f72
No known key found for this signature in database
29 changed files with 965 additions and 456 deletions

View file

@ -1,438 +1,38 @@
import { NodeConnectionType, NodeOperationError } from 'n8n-workflow';
import type {
INodeInputConfiguration,
INodeInputFilter,
IExecuteFunctions,
INodeExecutionData,
INodeType,
INodeTypeDescription,
INodeProperties,
} from 'n8n-workflow';
import type { INodeTypeBaseDescription } from 'n8n-workflow';
import { VersionedNodeType } from 'n8n-workflow';
import { conversationalAgentProperties } from './agents/ConversationalAgent/description';
import { conversationalAgentExecute } from './agents/ConversationalAgent/execute';
import { openAiFunctionsAgentProperties } from './agents/OpenAiFunctionsAgent/description';
import { openAiFunctionsAgentExecute } from './agents/OpenAiFunctionsAgent/execute';
import { planAndExecuteAgentProperties } from './agents/PlanAndExecuteAgent/description';
import { planAndExecuteAgentExecute } from './agents/PlanAndExecuteAgent/execute';
import { reActAgentAgentProperties } from './agents/ReActAgent/description';
import { reActAgentAgentExecute } from './agents/ReActAgent/execute';
import { sqlAgentAgentProperties } from './agents/SqlAgent/description';
import { sqlAgentAgentExecute } from './agents/SqlAgent/execute';
import { toolsAgentProperties } from './agents/ToolsAgent/description';
import { toolsAgentExecute } from './agents/ToolsAgent/execute';
import { promptTypeOptions, textFromPreviousNode, textInput } from '../../../utils/descriptions';
import { AgentV1 } from './v1/AgentV1.node';
import { AgentV2 } from './v2/AgentV2.node';
// Function used in the inputs expression to figure out which inputs to
// display based on the agent type
function getInputs(
agent: 'toolsAgent' | 'conversationalAgent' | 'openAiFunctionsAgent' | 'reActAgent' | 'sqlAgent',
hasOutputParser?: boolean,
): Array<NodeConnectionType | INodeInputConfiguration> {
interface SpecialInput {
type: NodeConnectionType;
filter?: INodeInputFilter;
required?: boolean;
}
const getInputData = (
inputs: SpecialInput[],
): Array<NodeConnectionType | INodeInputConfiguration> => {
const displayNames: { [key: string]: string } = {
[NodeConnectionType.AiLanguageModel]: 'Model',
[NodeConnectionType.AiMemory]: 'Memory',
[NodeConnectionType.AiTool]: 'Tool',
[NodeConnectionType.AiOutputParser]: 'Output Parser',
};
return inputs.map(({ type, filter }) => {
const isModelType = type === NodeConnectionType.AiLanguageModel;
let displayName = type in displayNames ? displayNames[type] : undefined;
if (
isModelType &&
['openAiFunctionsAgent', 'toolsAgent', 'conversationalAgent'].includes(agent)
) {
displayName = 'Chat Model';
}
const input: INodeInputConfiguration = {
type,
displayName,
required: isModelType,
maxConnections: [NodeConnectionType.AiLanguageModel, NodeConnectionType.AiMemory].includes(
type as NodeConnectionType,
)
? 1
: undefined,
};
if (filter) {
input.filter = filter;
}
return input;
});
};
let specialInputs: SpecialInput[] = [];
if (agent === 'conversationalAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
filter: {
nodes: [
'@n8n/n8n-nodes-langchain.lmChatAnthropic',
'@n8n/n8n-nodes-langchain.lmChatAwsBedrock',
'@n8n/n8n-nodes-langchain.lmChatGroq',
'@n8n/n8n-nodes-langchain.lmChatOllama',
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'@n8n/n8n-nodes-langchain.lmChatGoogleGemini',
'@n8n/n8n-nodes-langchain.lmChatGoogleVertex',
'@n8n/n8n-nodes-langchain.lmChatMistralCloud',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
],
},
},
{
type: NodeConnectionType.AiMemory,
},
{
type: NodeConnectionType.AiTool,
},
{
type: NodeConnectionType.AiOutputParser,
},
];
} else if (agent === 'toolsAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
filter: {
nodes: [
'@n8n/n8n-nodes-langchain.lmChatAnthropic',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
'@n8n/n8n-nodes-langchain.lmChatAwsBedrock',
'@n8n/n8n-nodes-langchain.lmChatMistralCloud',
'@n8n/n8n-nodes-langchain.lmChatOllama',
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'@n8n/n8n-nodes-langchain.lmChatGroq',
'@n8n/n8n-nodes-langchain.lmChatGoogleVertex',
'@n8n/n8n-nodes-langchain.lmChatGoogleGemini',
],
},
},
{
type: NodeConnectionType.AiMemory,
},
{
type: NodeConnectionType.AiTool,
required: true,
},
{
type: NodeConnectionType.AiOutputParser,
},
];
} else if (agent === 'openAiFunctionsAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
filter: {
nodes: [
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
],
},
},
{
type: NodeConnectionType.AiMemory,
},
{
type: NodeConnectionType.AiTool,
required: true,
},
{
type: NodeConnectionType.AiOutputParser,
},
];
} else if (agent === 'reActAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
},
{
type: NodeConnectionType.AiTool,
},
{
type: NodeConnectionType.AiOutputParser,
},
];
} else if (agent === 'sqlAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
},
{
type: NodeConnectionType.AiMemory,
},
];
} else if (agent === 'planAndExecuteAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
},
{
type: NodeConnectionType.AiTool,
},
{
type: NodeConnectionType.AiOutputParser,
},
];
}
if (hasOutputParser === false) {
specialInputs = specialInputs.filter(
(input) => input.type !== NodeConnectionType.AiOutputParser,
);
}
return [NodeConnectionType.Main, ...getInputData(specialInputs)];
}
const agentTypeProperty: INodeProperties = {
displayName: 'Agent',
const baseDescription: INodeTypeBaseDescription = {
displayName: 'AI Agent',
description: 'Generates an action plan and executes it. Can use external tools.',
name: 'agent',
type: 'options',
noDataExpression: true,
// eslint-disable-next-line n8n-nodes-base/node-param-options-type-unsorted-items
options: [
{
name: 'Tools Agent',
value: 'toolsAgent',
description:
'Utilizes structured tool schemas for precise and reliable tool selection and execution. Recommended for complex tasks requiring accurate and consistent tool usage, but only usable with models that support tool calling.',
icon: 'fa:robot',
group: ['transform'],
codex: {
alias: ['LangChain', 'Chat', 'Conversational', 'Plan and Execute', 'ReAct', 'Tools'],
categories: ['AI'],
subcategories: {
AI: ['Agents', 'Root Nodes'],
},
{
name: 'Conversational Agent',
value: 'conversationalAgent',
description:
'Describes tools in the system prompt and parses JSON responses for tool calls. More flexible but potentially less reliable than the Tools Agent. Suitable for simpler interactions or with models not supporting structured schemas.',
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/',
},
],
},
{
name: 'OpenAI Functions Agent',
value: 'openAiFunctionsAgent',
description:
"Leverages OpenAI's function calling capabilities to precisely select and execute tools. Excellent for tasks requiring structured outputs when working with OpenAI models.",
},
{
name: 'Plan and Execute Agent',
value: 'planAndExecuteAgent',
description:
'Creates a high-level plan for complex tasks and then executes each step. Suitable for multi-stage problems or when a strategic approach is needed.',
},
{
name: 'ReAct Agent',
value: 'reActAgent',
description:
'Combines reasoning and action in an iterative process. Effective for tasks that require careful analysis and step-by-step problem-solving.',
},
{
name: 'SQL Agent',
value: 'sqlAgent',
description:
'Specializes in interacting with SQL databases. Ideal for data analysis tasks, generating queries, or extracting insights from structured data.',
},
],
default: '',
},
defaultVersion: 2,
};
export class Agent implements INodeType {
description: INodeTypeDescription = {
displayName: 'AI Agent',
name: 'agent',
icon: 'fa:robot',
iconColor: 'black',
group: ['transform'],
version: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
description: 'Generates an action plan and executes it. Can use external tools.',
subtitle:
"={{ { toolsAgent: 'Tools Agent', conversationalAgent: 'Conversational Agent', openAiFunctionsAgent: 'OpenAI Functions Agent', reActAgent: 'ReAct Agent', sqlAgent: 'SQL Agent', planAndExecuteAgent: 'Plan and Execute Agent' }[$parameter.agent] }}",
defaults: {
name: 'AI Agent',
color: '#404040',
},
codex: {
alias: ['LangChain', 'Chat', 'Conversational', 'Plan and Execute', 'ReAct', 'Tools'],
categories: ['AI'],
subcategories: {
AI: ['Agents', 'Root Nodes'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/',
},
],
},
},
inputs: `={{
((agent, hasOutputParser) => {
${getInputs.toString()};
return getInputs(agent, hasOutputParser)
})($parameter.agent, $parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true)
}}`,
outputs: [NodeConnectionType.Main],
credentials: [
{
// eslint-disable-next-line n8n-nodes-base/node-class-description-credentials-name-unsuffixed
name: 'mySql',
required: true,
testedBy: 'mysqlConnectionTest',
displayOptions: {
show: {
agent: ['sqlAgent'],
'/dataSource': ['mysql'],
},
},
},
{
name: 'postgres',
required: true,
displayOptions: {
show: {
agent: ['sqlAgent'],
'/dataSource': ['postgres'],
},
},
},
],
properties: [
{
displayName:
'Tip: Get a feel for agents with our quick <a href="https://docs.n8n.io/advanced-ai/intro-tutorial/" target="_blank">tutorial</a> or see an <a href="/templates/1954" target="_blank">example</a> of how this node works',
name: 'notice_tip',
type: 'notice',
default: '',
displayOptions: {
show: {
agent: ['conversationalAgent', 'toolsAgent'],
},
},
},
// Make Conversational Agent the default agent for versions 1.5 and below
{
...agentTypeProperty,
options: agentTypeProperty?.options?.filter(
(o) => 'value' in o && o.value !== 'toolsAgent',
),
displayOptions: { show: { '@version': [{ _cnd: { lte: 1.5 } }] } },
default: 'conversationalAgent',
},
// Make Tools Agent the default agent for versions 1.6 and above
{
...agentTypeProperty,
displayOptions: { show: { '@version': [{ _cnd: { gte: 1.6 } }] } },
default: 'toolsAgent',
},
{
...promptTypeOptions,
displayOptions: {
hide: {
'@version': [{ _cnd: { lte: 1.2 } }],
agent: ['sqlAgent'],
},
},
},
{
...textFromPreviousNode,
displayOptions: {
show: { promptType: ['auto'], '@version': [{ _cnd: { gte: 1.7 } }] },
// SQL Agent has data source and credentials parameters so we need to include this input there manually
// to preserve the order
hide: {
agent: ['sqlAgent'],
},
},
},
{
...textInput,
displayOptions: {
show: {
promptType: ['define'],
},
hide: {
agent: ['sqlAgent'],
},
},
},
{
displayName: 'For more reliable structured output parsing, consider using the Tools agent',
name: 'notice',
type: 'notice',
default: '',
displayOptions: {
show: {
hasOutputParser: [true],
agent: [
'conversationalAgent',
'reActAgent',
'planAndExecuteAgent',
'openAiFunctionsAgent',
],
},
},
},
{
displayName: 'Require Specific Output Format',
name: 'hasOutputParser',
type: 'boolean',
default: false,
noDataExpression: true,
displayOptions: {
hide: {
'@version': [{ _cnd: { lte: 1.2 } }],
agent: ['sqlAgent'],
},
},
},
{
displayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionType.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,
name: 'notice',
type: 'notice',
default: '',
displayOptions: {
show: {
hasOutputParser: [true],
agent: ['toolsAgent'],
},
},
},
...toolsAgentProperties,
...conversationalAgentProperties,
...openAiFunctionsAgentProperties,
...reActAgentAgentProperties,
...sqlAgentAgentProperties,
...planAndExecuteAgentProperties,
],
};
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
const agentType = this.getNodeParameter('agent', 0, '') as string;
const nodeVersion = this.getNode().typeVersion;
if (agentType === 'conversationalAgent') {
return await conversationalAgentExecute.call(this, nodeVersion);
} else if (agentType === 'toolsAgent') {
return await toolsAgentExecute.call(this);
} else if (agentType === 'openAiFunctionsAgent') {
return await openAiFunctionsAgentExecute.call(this, nodeVersion);
} else if (agentType === 'reActAgent') {
return await reActAgentAgentExecute.call(this, nodeVersion);
} else if (agentType === 'sqlAgent') {
return await sqlAgentAgentExecute.call(this);
} else if (agentType === 'planAndExecuteAgent') {
return await planAndExecuteAgentExecute.call(this, nodeVersion);
}
throw new NodeOperationError(this.getNode(), `The agent type "${agentType}" is not supported`);
export class Agent extends VersionedNodeType {
constructor() {
const nodeVersions = {
1: new AgentV1(baseDescription),
2: new AgentV2(baseDescription),
};
super(nodeVersions, baseDescription);
}
}

View file

@ -0,0 +1,445 @@
import { NodeConnectionType, NodeOperationError } from 'n8n-workflow';
import type {
INodeInputConfiguration,
INodeInputFilter,
IExecuteFunctions,
INodeExecutionData,
INodeType,
INodeTypeDescription,
INodeProperties,
INodeTypeBaseDescription,
} from 'n8n-workflow';
import { conversationalAgentProperties } from './agents/ConversationalAgent/description';
import { conversationalAgentExecute } from './agents/ConversationalAgent/execute';
import { openAiFunctionsAgentProperties } from './agents/OpenAiFunctionsAgent/description';
import { openAiFunctionsAgentExecute } from './agents/OpenAiFunctionsAgent/execute';
import { planAndExecuteAgentProperties } from './agents/PlanAndExecuteAgent/description';
import { planAndExecuteAgentExecute } from './agents/PlanAndExecuteAgent/execute';
import { reActAgentAgentProperties } from './agents/ReActAgent/description';
import { reActAgentAgentExecute } from './agents/ReActAgent/execute';
import { sqlAgentAgentProperties } from './agents/SqlAgent/description';
import { sqlAgentAgentExecute } from './agents/SqlAgent/execute';
import { toolsAgentProperties } from './agents/ToolsAgent/description';
import { toolsAgentExecute } from './agents/ToolsAgent/execute';
import { promptTypeOptions, textFromPreviousNode, textInput } from '../../../../utils/descriptions';
// Function used in the inputs expression to figure out which inputs to
// display based on the agent type
function getInputs(
agent: 'toolsAgent' | 'conversationalAgent' | 'openAiFunctionsAgent' | 'reActAgent' | 'sqlAgent',
hasOutputParser?: boolean,
): Array<NodeConnectionType | INodeInputConfiguration> {
interface SpecialInput {
type: NodeConnectionType;
filter?: INodeInputFilter;
required?: boolean;
}
const getInputData = (
inputs: SpecialInput[],
): Array<NodeConnectionType | INodeInputConfiguration> => {
const displayNames: { [key: string]: string } = {
[NodeConnectionType.AiLanguageModel]: 'Model',
[NodeConnectionType.AiMemory]: 'Memory',
[NodeConnectionType.AiTool]: 'Tool',
[NodeConnectionType.AiOutputParser]: 'Output Parser',
};
return inputs.map(({ type, filter }) => {
const isModelType = type === NodeConnectionType.AiLanguageModel;
let displayName = type in displayNames ? displayNames[type] : undefined;
if (
isModelType &&
['openAiFunctionsAgent', 'toolsAgent', 'conversationalAgent'].includes(agent)
) {
displayName = 'Chat Model';
}
const input: INodeInputConfiguration = {
type,
displayName,
required: isModelType,
maxConnections: [NodeConnectionType.AiLanguageModel, NodeConnectionType.AiMemory].includes(
type as NodeConnectionType,
)
? 1
: undefined,
};
if (filter) {
input.filter = filter;
}
return input;
});
};
let specialInputs: SpecialInput[] = [];
if (agent === 'conversationalAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
filter: {
nodes: [
'@n8n/n8n-nodes-langchain.lmChatAnthropic',
'@n8n/n8n-nodes-langchain.lmChatAwsBedrock',
'@n8n/n8n-nodes-langchain.lmChatGroq',
'@n8n/n8n-nodes-langchain.lmChatOllama',
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'@n8n/n8n-nodes-langchain.lmChatGoogleGemini',
'@n8n/n8n-nodes-langchain.lmChatGoogleVertex',
'@n8n/n8n-nodes-langchain.lmChatMistralCloud',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
],
},
},
{
type: NodeConnectionType.AiMemory,
},
{
type: NodeConnectionType.AiTool,
},
{
type: NodeConnectionType.AiOutputParser,
},
];
} else if (agent === 'toolsAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
filter: {
nodes: [
'@n8n/n8n-nodes-langchain.lmChatAnthropic',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
'@n8n/n8n-nodes-langchain.lmChatAwsBedrock',
'@n8n/n8n-nodes-langchain.lmChatMistralCloud',
'@n8n/n8n-nodes-langchain.lmChatOllama',
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'@n8n/n8n-nodes-langchain.lmChatGroq',
'@n8n/n8n-nodes-langchain.lmChatGoogleVertex',
'@n8n/n8n-nodes-langchain.lmChatGoogleGemini',
],
},
},
{
type: NodeConnectionType.AiMemory,
},
{
type: NodeConnectionType.AiTool,
required: true,
},
{
type: NodeConnectionType.AiOutputParser,
},
];
} else if (agent === 'openAiFunctionsAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
filter: {
nodes: [
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
],
},
},
{
type: NodeConnectionType.AiMemory,
},
{
type: NodeConnectionType.AiTool,
required: true,
},
{
type: NodeConnectionType.AiOutputParser,
},
];
} else if (agent === 'reActAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
},
{
type: NodeConnectionType.AiTool,
},
{
type: NodeConnectionType.AiOutputParser,
},
];
} else if (agent === 'sqlAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
},
{
type: NodeConnectionType.AiMemory,
},
];
} else if (agent === 'planAndExecuteAgent') {
specialInputs = [
{
type: NodeConnectionType.AiLanguageModel,
},
{
type: NodeConnectionType.AiTool,
},
{
type: NodeConnectionType.AiOutputParser,
},
];
}
if (hasOutputParser === false) {
specialInputs = specialInputs.filter(
(input) => input.type !== NodeConnectionType.AiOutputParser,
);
}
return [NodeConnectionType.Main, ...getInputData(specialInputs)];
}
const agentTypeProperty: INodeProperties = {
displayName: 'Agent',
name: 'agent',
type: 'options',
noDataExpression: true,
// eslint-disable-next-line n8n-nodes-base/node-param-options-type-unsorted-items
options: [
{
name: 'Tools Agent',
value: 'toolsAgent',
description:
'Utilizes structured tool schemas for precise and reliable tool selection and execution. Recommended for complex tasks requiring accurate and consistent tool usage, but only usable with models that support tool calling.',
},
{
name: 'Conversational Agent',
value: 'conversationalAgent',
description:
'Describes tools in the system prompt and parses JSON responses for tool calls. More flexible but potentially less reliable than the Tools Agent. Suitable for simpler interactions or with models not supporting structured schemas.',
},
{
name: 'OpenAI Functions Agent',
value: 'openAiFunctionsAgent',
description:
"Leverages OpenAI's function calling capabilities to precisely select and execute tools. Excellent for tasks requiring structured outputs when working with OpenAI models.",
},
{
name: 'Plan and Execute Agent',
value: 'planAndExecuteAgent',
description:
'Creates a high-level plan for complex tasks and then executes each step. Suitable for multi-stage problems or when a strategic approach is needed.',
},
{
name: 'ReAct Agent',
value: 'reActAgent',
description:
'Combines reasoning and action in an iterative process. Effective for tasks that require careful analysis and step-by-step problem-solving.',
},
{
name: 'SQL Agent',
value: 'sqlAgent',
description:
'Specializes in interacting with SQL databases. Ideal for data analysis tasks, generating queries, or extracting insights from structured data.',
},
],
default: '',
};
const versionDescription: INodeTypeDescription = {
displayName: 'AI Agent',
name: 'agent',
icon: 'fa:robot',
iconColor: 'black',
group: ['transform'],
version: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
description: 'Generates an action plan and executes it. Can use external tools.',
subtitle:
"={{ { toolsAgent: 'Tools Agent', conversationalAgent: 'Conversational Agent', openAiFunctionsAgent: 'OpenAI Functions Agent', reActAgent: 'ReAct Agent', sqlAgent: 'SQL Agent', planAndExecuteAgent: 'Plan and Execute Agent' }[$parameter.agent] }}",
defaults: {
name: 'AI Agent',
color: '#404040',
},
codex: {
alias: ['LangChain', 'Chat', 'Conversational', 'Plan and Execute', 'ReAct', 'Tools'],
categories: ['AI'],
subcategories: {
AI: ['Agents', 'Root Nodes'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.agent/',
},
],
},
},
inputs: `={{
((agent, hasOutputParser) => {
${getInputs.toString()};
return getInputs(agent, hasOutputParser)
})($parameter.agent, $parameter.hasOutputParser === undefined || $parameter.hasOutputParser === true)
}}`,
outputs: [NodeConnectionType.Main],
credentials: [
{
// eslint-disable-next-line n8n-nodes-base/node-class-description-credentials-name-unsuffixed
name: 'mySql',
required: true,
testedBy: 'mysqlConnectionTest',
displayOptions: {
show: {
agent: ['sqlAgent'],
'/dataSource': ['mysql'],
},
},
},
{
name: 'postgres',
required: true,
displayOptions: {
show: {
agent: ['sqlAgent'],
'/dataSource': ['postgres'],
},
},
},
],
properties: [
{
displayName:
'Tip: Get a feel for agents with our quick <a href="https://docs.n8n.io/advanced-ai/intro-tutorial/" target="_blank">tutorial</a> or see an <a href="/templates/1954" target="_blank">example</a> of how this node works',
name: 'notice_tip',
type: 'notice',
default: '',
displayOptions: {
show: {
agent: ['conversationalAgent', 'toolsAgent'],
},
},
},
// Make Conversational Agent the default agent for versions 1.5 and below
{
...agentTypeProperty,
options: agentTypeProperty?.options?.filter((o) => 'value' in o && o.value !== 'toolsAgent'),
displayOptions: { show: { '@version': [{ _cnd: { lte: 1.5 } }] } },
default: 'conversationalAgent',
},
// Make Tools Agent the default agent for versions 1.6 and above
{
...agentTypeProperty,
displayOptions: { show: { '@version': [{ _cnd: { gte: 1.6 } }] } },
default: 'toolsAgent',
},
{
...promptTypeOptions,
displayOptions: {
hide: {
'@version': [{ _cnd: { lte: 1.2 } }],
agent: ['sqlAgent'],
},
},
},
{
...textFromPreviousNode,
displayOptions: {
show: { promptType: ['auto'], '@version': [{ _cnd: { gte: 1.7 } }] },
// SQL Agent has data source and credentials parameters so we need to include this input there manually
// to preserve the order
hide: {
agent: ['sqlAgent'],
},
},
},
{
...textInput,
displayOptions: {
show: {
promptType: ['define'],
},
hide: {
agent: ['sqlAgent'],
},
},
},
{
displayName: 'For more reliable structured output parsing, consider using the Tools agent',
name: 'notice',
type: 'notice',
default: '',
displayOptions: {
show: {
hasOutputParser: [true],
agent: [
'conversationalAgent',
'reActAgent',
'planAndExecuteAgent',
'openAiFunctionsAgent',
],
},
},
},
{
displayName: 'Require Specific Output Format',
name: 'hasOutputParser',
type: 'boolean',
default: false,
noDataExpression: true,
displayOptions: {
hide: {
'@version': [{ _cnd: { lte: 1.2 } }],
agent: ['sqlAgent'],
},
},
},
{
displayName: `Connect an <a data-action='openSelectiveNodeCreator' data-action-parameter-connectiontype='${NodeConnectionType.AiOutputParser}'>output parser</a> on the canvas to specify the output format you require`,
name: 'notice',
type: 'notice',
default: '',
displayOptions: {
show: {
hasOutputParser: [true],
agent: ['toolsAgent'],
},
},
},
...toolsAgentProperties,
...conversationalAgentProperties,
...openAiFunctionsAgentProperties,
...reActAgentAgentProperties,
...sqlAgentAgentProperties,
...planAndExecuteAgentProperties,
],
};
export class AgentV1 implements INodeType {
description: INodeTypeDescription;
constructor(baseDescription: INodeTypeBaseDescription) {
this.description = {
...baseDescription,
...versionDescription,
};
}
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
const agentType = this.getNodeParameter('agent', 0, '') as string;
const nodeVersion = this.getNode().typeVersion;
if (agentType === 'conversationalAgent') {
return await conversationalAgentExecute.call(this, nodeVersion);
} else if (agentType === 'toolsAgent') {
return await toolsAgentExecute.call(this);
} else if (agentType === 'openAiFunctionsAgent') {
return await openAiFunctionsAgentExecute.call(this, nodeVersion);
} else if (agentType === 'reActAgent') {
return await reActAgentAgentExecute.call(this, nodeVersion);
} else if (agentType === 'sqlAgent') {
return await sqlAgentAgentExecute.call(this);
} else if (agentType === 'planAndExecuteAgent') {
return await planAndExecuteAgentExecute.call(this, nodeVersion);
}
throw new NodeOperationError(this.getNode(), `The agent type "${agentType}" is not supported`);
}
}

View file

@ -10,10 +10,10 @@ import {
isChatInstance,
getPromptInputByType,
getConnectedTools,
} from '../../../../../utils/helpers';
import { getOptionalOutputParsers } from '../../../../../utils/output_parsers/N8nOutputParser';
import { throwIfToolSchema } from '../../../../../utils/schemaParsing';
import { getTracingConfig } from '../../../../../utils/tracing';
} from '../../../../../../utils/helpers';
import { getOptionalOutputParsers } from '../../../../../../utils/output_parsers/N8nOutputParser';
import { throwIfToolSchema } from '../../../../../../utils/schemaParsing';
import { getTracingConfig } from '../../../../../../utils/tracing';
import { checkForStructuredTools, extractParsedOutput } from '../utils';
export async function conversationalAgentExecute(

View file

@ -12,9 +12,9 @@ import {
NodeOperationError,
} from 'n8n-workflow';
import { getConnectedTools, getPromptInputByType } from '../../../../../utils/helpers';
import { getOptionalOutputParsers } from '../../../../../utils/output_parsers/N8nOutputParser';
import { getTracingConfig } from '../../../../../utils/tracing';
import { getConnectedTools, getPromptInputByType } from '../../../../../../utils/helpers';
import { getOptionalOutputParsers } from '../../../../../../utils/output_parsers/N8nOutputParser';
import { getTracingConfig } from '../../../../../../utils/tracing';
import { extractParsedOutput } from '../utils';
export async function openAiFunctionsAgentExecute(

View file

@ -10,10 +10,10 @@ import {
NodeOperationError,
} from 'n8n-workflow';
import { getConnectedTools, getPromptInputByType } from '../../../../../utils/helpers';
import { getOptionalOutputParsers } from '../../../../../utils/output_parsers/N8nOutputParser';
import { throwIfToolSchema } from '../../../../../utils/schemaParsing';
import { getTracingConfig } from '../../../../../utils/tracing';
import { getConnectedTools, getPromptInputByType } from '../../../../../../utils/helpers';
import { getOptionalOutputParsers } from '../../../../../../utils/output_parsers/N8nOutputParser';
import { throwIfToolSchema } from '../../../../../../utils/schemaParsing';
import { getTracingConfig } from '../../../../../../utils/tracing';
import { checkForStructuredTools, extractParsedOutput } from '../utils';
export async function planAndExecuteAgentExecute(

View file

@ -15,10 +15,10 @@ import {
getConnectedTools,
getPromptInputByType,
isChatInstance,
} from '../../../../../utils/helpers';
import { getOptionalOutputParsers } from '../../../../../utils/output_parsers/N8nOutputParser';
import { throwIfToolSchema } from '../../../../../utils/schemaParsing';
import { getTracingConfig } from '../../../../../utils/tracing';
} from '../../../../../../utils/helpers';
import { getOptionalOutputParsers } from '../../../../../../utils/output_parsers/N8nOutputParser';
import { throwIfToolSchema } from '../../../../../../utils/schemaParsing';
import { getTracingConfig } from '../../../../../../utils/tracing';
import { checkForStructuredTools, extractParsedOutput } from '../utils';
export async function reActAgentAgentExecute(

View file

@ -5,7 +5,7 @@ import {
promptTypeOptions,
textFromPreviousNode,
textInput,
} from '../../../../../utils/descriptions';
} from '../../../../../../utils/descriptions';
const dataSourceOptions: INodeProperties = {
displayName: 'Data Source',

View file

@ -16,8 +16,8 @@ import { getMysqlDataSource } from './other/handlers/mysql';
import { getPostgresDataSource } from './other/handlers/postgres';
import { getSqliteDataSource } from './other/handlers/sqlite';
import { SQL_PREFIX, SQL_SUFFIX } from './other/prompts';
import { getPromptInputByType, serializeChatHistory } from '../../../../../utils/helpers';
import { getTracingConfig } from '../../../../../utils/tracing';
import { getPromptInputByType, serializeChatHistory } from '../../../../../../utils/helpers';
import { getTracingConfig } from '../../../../../../utils/tracing';
const parseTablesString = (tablesString: string) =>
tablesString

View file

@ -19,11 +19,11 @@ import {
isChatInstance,
getPromptInputByType,
getConnectedTools,
} from '../../../../../utils/helpers';
} from '../../../../../../utils/helpers';
import {
getOptionalOutputParsers,
type N8nOutputParser,
} from '../../../../../utils/output_parsers/N8nOutputParser';
} from '../../../../../../utils/output_parsers/N8nOutputParser';
function getOutputParserSchema(outputParser: N8nOutputParser): ZodObject<any, any, any, any> {
const schema =

View file

@ -1,7 +1,7 @@
import type { z } from 'zod';
import type { BaseOutputParser } from '@langchain/core/output_parsers';
import type { DynamicStructuredTool, Tool } from 'langchain/tools';
import { NodeOperationError, type IExecuteFunctions, type INode } from 'n8n-workflow';
import type { z } from 'zod';
type ZodObjectAny = z.ZodObject<any, any, any, any>;

View file

@ -0,0 +1,424 @@
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import {
HumanMessage,
SystemMessage,
type AIMessage,
type BaseMessage,
trimMessages,
} from '@langchain/core/messages';
import type { RunnableConfig } from '@langchain/core/runnables';
import { tool } from '@langchain/core/tools';
import { StateGraph, Annotation, END, MemorySaver } from '@langchain/langgraph';
import { ToolNode } from '@langchain/langgraph/prebuilt';
import type { BaseChatMemory } from 'langchain/memory';
import type { DynamicStructuredTool, Tool } from 'langchain/tools';
import type {
IExecuteFunctions,
INodeType,
INodeTypeDescription,
INodeTypeBaseDescription,
INodeExecutionData,
} from 'n8n-workflow';
import { NodeConnectionType, NodeOperationError } from 'n8n-workflow';
import { v4 as uuidv4 } from 'uuid';
import { z } from 'zod';
import { promptTypeOptions, textFromPreviousNode, textInput } from '../../../../utils/descriptions';
import { getConnectedTools, getPromptInputByType, isChatInstance } from '../../../../utils/helpers';
const versionDescription: INodeTypeDescription = {
displayName: 'AI Agent',
name: 'agent',
icon: 'fa:robot',
group: ['transform'],
version: 2,
description: 'Generates an action plan and executes it. Can use external tools.',
defaults: {
name: 'AI Agent',
color: '#404040',
},
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
inputs: [
NodeConnectionType.Main,
{
type: NodeConnectionType.AiLanguageModel,
displayName: 'Chat Model',
required: true,
maxConnections: 1,
filter: {
nodes: [
'@n8n/n8n-nodes-langchain.lmChatAnthropic',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
'@n8n/n8n-nodes-langchain.lmChatAwsBedrock',
'@n8n/n8n-nodes-langchain.lmChatMistralCloud',
'@n8n/n8n-nodes-langchain.lmChatOllama',
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'@n8n/n8n-nodes-langchain.lmChatGroq',
'@n8n/n8n-nodes-langchain.lmChatGoogleVertex',
'@n8n/n8n-nodes-langchain.lmChatGoogleGemini',
],
},
},
{
type: NodeConnectionType.AiMemory,
displayName: 'Memory',
required: false,
maxConnections: 1,
},
{
type: NodeConnectionType.AiTool,
displayName: 'Tools',
required: false,
},
],
outputs: [NodeConnectionType.Main],
properties: [
promptTypeOptions,
{ ...textInput, displayOptions: { show: { promptType: ['define'] } } },
{ ...textFromPreviousNode, displayOptions: { show: { promptType: ['auto'] } } },
{
displayName: 'System Message',
name: 'systemMessage',
type: 'string',
default: 'You are a helpful assistant',
description: 'The message that will be sent to the agent before the conversation starts',
typeOptions: {
rows: 10,
},
},
{
displayName: 'Conversation Routes',
name: 'routes',
placeholder: 'Add Route',
type: 'fixedCollection',
typeOptions: {
multipleValues: true,
sortable: true,
},
default: {},
options: [
{
name: 'values',
displayName: 'Route',
values: [
{
displayName: 'Name',
name: 'name',
type: 'string',
default: '',
placeholder: 'e.g. Technical Support',
description: 'Name of this conversation route',
},
{
displayName: 'Description',
name: 'description',
type: 'string',
default: '',
placeholder: 'e.g. Handle technical questions about our product',
description: 'Description of when this route should be used',
},
{
displayName: 'Conditions',
name: 'conditions',
placeholder: 'Add Condition',
type: 'fixedCollection',
typeOptions: {
multipleValues: true,
},
default: {},
options: [
{
name: 'conditions',
displayName: 'Conditions',
values: [
{
displayName: 'Value',
name: 'value',
type: 'string',
default: '={{ $json.text }}',
description: 'The value to evaluate',
},
{
displayName: 'Operation',
name: 'operation',
type: 'options',
noDataExpression: true,
options: [
{
name: 'Contains',
value: 'contains',
},
{
name: 'Ends With',
value: 'endsWith',
},
{
name: 'Equals',
value: 'equals',
},
{
name: 'Not Contains',
value: 'notContains',
},
{
name: 'Not Equals',
value: 'notEquals',
},
{
name: 'Regex',
value: 'regex',
},
{
name: 'Starts With',
value: 'startsWith',
},
],
default: 'contains',
},
{
displayName: 'Match',
name: 'match',
type: 'string',
default: '',
placeholder: 'e.g. technical',
description: 'The value to match against',
},
],
},
],
},
],
},
],
},
{
displayName: 'Options',
name: 'options',
type: 'collection',
default: {},
placeholder: 'Add Option',
options: [
{
displayName: 'Max Iterations',
name: 'maxIterations',
type: 'number',
default: 10,
description: 'The maximum number of iterations the agent will run before stopping',
},
{
displayName: 'Return Intermediate Steps',
name: 'returnIntermediateSteps',
type: 'boolean',
default: false,
description: 'Whether or not the output should include intermediate steps the agent took',
},
{
displayName: 'Automatically Passthrough Binary Images',
name: 'passthroughBinaryImages',
type: 'boolean',
default: true,
description:
'Whether or not binary images should be automatically passed through to the agent as image type messages',
},
],
},
],
};
export class AgentV2 implements INodeType {
description: INodeTypeDescription;
constructor(baseDescription: INodeTypeBaseDescription) {
this.description = {
...baseDescription,
...versionDescription,
};
}
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
const model = (await this.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseChatModel;
if (!isChatInstance(model) || !model.bindTools) {
throw new NodeOperationError(
this.getNode(),
'Langraph Agent requires Chat Model which supports Tools calling',
);
}
const memory = (await this.getInputConnectionData(NodeConnectionType.AiMemory, 0)) as
| BaseChatMemory
| undefined;
const tools = (await getConnectedTools(this, true, false)) as Array<
DynamicStructuredTool | Tool
>;
const responseSchema = z.object({
regular_answer: z.string().describe('Regular answer'),
pirate_voice_answer: z.string().describe('Answer as pirate'),
});
const finalResponseTool = tool(async () => 'mocked value', {
name: 'format_final_response',
description: 'Always respond to the user using this tool.',
schema: responseSchema,
});
const StateAnnotation = Annotation.Root({
input: Annotation<{
prompt: string;
workflowName?: string;
}>({
reducer: (x, y) => y ?? x ?? { prompt: '' },
}),
messages: Annotation<BaseMessage[]>({
reducer: (x, y) => x.concat(y),
}),
parsedOutput: Annotation<Record<string, unknown>>({
reducer: (x, y) => y ?? x ?? { output: {} },
}),
});
const toolNode = new ToolNode(tools);
const modelWithTools = model.bindTools(tools);
const stateModifier = async (messages: BaseMessage[]): Promise<BaseMessage[]> => {
return await trimMessages(messages, {
tokenCounter: (msgs) => msgs.length,
maxTokens: 10, // Adjust this number based on needs
strategy: 'last',
startOn: 'human',
includeSystem: false,
allowPartial: false,
});
};
const memorySaver = new MemorySaver();
async function shouldContinue(state: typeof StateAnnotation.State) {
const { messages } = state;
const lastMessage = messages[messages.length - 1] as AIMessage;
if (!lastMessage.tool_calls || lastMessage.tool_calls.length === 0) {
if (memory) {
const processedMessages = await stateModifier(messages);
const lastHumanMsg = processedMessages.find((m) => m.getType() === 'human');
const lastAIMsg = processedMessages.find((m) => m.getType() === 'ai');
if (lastHumanMsg && lastAIMsg) {
await memory.saveContext(
{ input: lastHumanMsg.content },
{ output: lastAIMsg.content },
);
}
}
return END;
}
return 'tools';
}
async function callModel(state: typeof StateAnnotation.State, config?: RunnableConfig) {
// console.log('Calling model with state: ', state)
const messages = state.messages;
const response = await modelWithTools.invoke(messages, config);
// console.log('Response from model: ', JSON.stringify(response, null, 2))
// We return a list, because this will get added to the existing list
return { messages: [response] };
}
const workflow = new StateGraph(StateAnnotation)
.addNode('agent', callModel)
// .addNode('formatResponse', formatResponse)
.addNode('tools', toolNode)
.addEdge('__start__', 'agent')
.addConditionalEdges(
// First, we define the start node. We use `agent`.
// This means these are the edges taken after the `agent` node is called.
'agent',
// Next, we pass in the function that will determine which node is called next.
shouldContinue,
// We supply a map of possible response values to the conditional edge
// to make it possible to draw a visualization of the graph.
{
[END]: END,
tools: 'tools',
// formatResponse: 'formatResponse',
},
)
// We now add a normal edge from `tools` to `agent`.
// This means that after `tools` is called, `agent` node is called next.
.addEdge('tools', 'agent');
const app = workflow.compile({
checkpointer: memorySaver,
});
const returnData: INodeExecutionData[] = [];
const items = this.getInputData();
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
try {
const systemMessage = this.getNodeParameter('systemMessage', itemIndex, '') as string;
const options = this.getNodeParameter('options', itemIndex, {}) as {
maxIterations?: number;
returnIntermediateSteps?: boolean;
};
const input = getPromptInputByType({
ctx: this,
i: itemIndex,
inputKey: 'text',
promptTypeKey: 'promptType',
});
if (input === undefined) {
throw new NodeOperationError(this.getNode(), 'The text parameter is empty.');
}
const chatHistory = (await memory?.chatHistory.getMessages()) ?? [];
const threadId = uuidv4();
const processedHistory = await stateModifier(chatHistory);
const response = await app.invoke(
{
messages: [
new SystemMessage(systemMessage),
...processedHistory,
new HumanMessage(input),
],
},
{
configurable: {
thread_id: threadId,
maxIterations: options.maxIterations ?? 10,
},
},
);
// Get the final AI message from the response
const messages = response.messages ?? [];
const lastAIMessage = messages[messages.length - 1] as AIMessage;
returnData.push({
json: {
output: lastAIMessage?.content ?? '',
threadId,
},
});
} catch (error) {
if (this.continueOnFail()) {
returnData.push({
json: { error: error.message },
pairedItem: { item: itemIndex },
});
continue;
}
throw error;
}
}
return [returnData];
}
}

View file

@ -145,6 +145,7 @@
"@langchain/google-genai": "0.1.4",
"@langchain/google-vertexai": "0.1.3",
"@langchain/groq": "0.1.2",
"@langchain/langgraph": "^0.2.27",
"@langchain/mistralai": "0.2.0",
"@langchain/ollama": "0.1.2",
"@langchain/openai": "0.3.14",

View file

@ -29,6 +29,7 @@ const versionDescription: INodeTypeDescription = {
},
inputs: [NodeConnectionType.Main],
outputs: [NodeConnectionType.Main],
usableAsTool: true,
properties: [
{
displayName: 'Mode',

View file

@ -471,6 +471,9 @@ importers:
'@langchain/groq':
specifier: 0.1.2
version: 0.1.2(@langchain/core@0.3.19(openai@4.73.1(encoding@0.1.13)(zod@3.23.8)))(encoding@0.1.13)
'@langchain/langgraph':
specifier: ^0.2.27
version: 0.2.27(@langchain/core@0.3.19(openai@4.73.1(encoding@0.1.13)(zod@3.23.8)))
'@langchain/mistralai':
specifier: 0.2.0
version: 0.2.0(@langchain/core@0.3.19(openai@4.73.1(encoding@0.1.13)(zod@3.23.8)))
@ -3585,6 +3588,21 @@ packages:
peerDependencies:
'@langchain/core': '>=0.2.21 <0.4.0'
'@langchain/langgraph-checkpoint@0.0.13':
resolution: {integrity: sha512-amdmBcNT8a9xP2VwcEWxqArng4gtRDcnVyVI4DsQIo1Aaz8e8+hH17zSwrUF3pt1pIYztngIfYnBOim31mtKMg==}
engines: {node: '>=18'}
peerDependencies:
'@langchain/core': '>=0.2.31 <0.4.0'
'@langchain/langgraph-sdk@0.0.31':
resolution: {integrity: sha512-oYZWoC3x7vH9bAL1Y30XjtuWnic1j3knXD4BbldsY0chFLxwIT5i6/GMThNy3Oiwb4SB+c6gvaSuxBNDkp7dkw==}
'@langchain/langgraph@0.2.27':
resolution: {integrity: sha512-7+PlVXlNpswzXzZp/k8O99YBN3zBkUdusfyxISkZ/gdXz1p5RySQEpKQ4EVIZnzBrZ98zZ3FArj4OWOgeF0EeA==}
engines: {node: '>=18'}
peerDependencies:
'@langchain/core': '>=0.2.36 <0.3.0 || >=0.3.9 < 0.4.0'
'@langchain/mistralai@0.2.0':
resolution: {integrity: sha512-VdfbKZopAuSXf/vlXbriGWLK3c7j5s47DoB3S31xpprY2BMSKZZiX9vE9TsgxMfAPuIDPIYcfgU7p1upvTYt8g==}
engines: {node: '>=18'}
@ -14755,6 +14773,26 @@ snapshots:
- encoding
- supports-color
'@langchain/langgraph-checkpoint@0.0.13(@langchain/core@0.3.19(openai@4.73.1(encoding@0.1.13)(zod@3.23.8)))':
dependencies:
'@langchain/core': 0.3.19(openai@4.73.1(encoding@0.1.13)(zod@3.23.8))
uuid: 10.0.0
'@langchain/langgraph-sdk@0.0.31':
dependencies:
'@types/json-schema': 7.0.15
p-queue: 6.6.2
p-retry: 4.6.2
uuid: 9.0.1
'@langchain/langgraph@0.2.27(@langchain/core@0.3.19(openai@4.73.1(encoding@0.1.13)(zod@3.23.8)))':
dependencies:
'@langchain/core': 0.3.19(openai@4.73.1(encoding@0.1.13)(zod@3.23.8))
'@langchain/langgraph-checkpoint': 0.0.13(@langchain/core@0.3.19(openai@4.73.1(encoding@0.1.13)(zod@3.23.8)))
'@langchain/langgraph-sdk': 0.0.31
uuid: 10.0.0
zod: 3.23.8
'@langchain/mistralai@0.2.0(@langchain/core@0.3.19(openai@4.73.1(encoding@0.1.13)(zod@3.23.8)))':
dependencies:
'@langchain/core': 0.3.19(openai@4.73.1(encoding@0.1.13)(zod@3.23.8))
@ -19228,7 +19266,7 @@ snapshots:
eslint-import-resolver-node@0.3.9:
dependencies:
debug: 3.2.7(supports-color@5.5.0)
debug: 3.2.7(supports-color@8.1.1)
is-core-module: 2.13.1
resolve: 1.22.8
transitivePeerDependencies:
@ -19253,7 +19291,7 @@ snapshots:
eslint-module-utils@2.8.0(@typescript-eslint/parser@7.2.0(eslint@8.57.0)(typescript@5.7.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.6.1(@typescript-eslint/parser@7.2.0(eslint@8.57.0)(typescript@5.7.2))(eslint-plugin-import@2.29.1)(eslint@8.57.0))(eslint@8.57.0):
dependencies:
debug: 3.2.7(supports-color@5.5.0)
debug: 3.2.7(supports-color@8.1.1)
optionalDependencies:
'@typescript-eslint/parser': 7.2.0(eslint@8.57.0)(typescript@5.7.2)
eslint: 8.57.0
@ -19273,7 +19311,7 @@ snapshots:
array.prototype.findlastindex: 1.2.3
array.prototype.flat: 1.3.2
array.prototype.flatmap: 1.3.2
debug: 3.2.7(supports-color@5.5.0)
debug: 3.2.7(supports-color@8.1.1)
doctrine: 2.1.0
eslint: 8.57.0
eslint-import-resolver-node: 0.3.9
@ -20052,7 +20090,7 @@ snapshots:
array-parallel: 0.1.3
array-series: 0.1.5
cross-spawn: 4.0.2
debug: 3.2.7(supports-color@5.5.0)
debug: 3.2.7(supports-color@8.1.1)
transitivePeerDependencies:
- supports-color
@ -22955,7 +22993,7 @@ snapshots:
pdf-parse@1.1.1:
dependencies:
debug: 3.2.7(supports-color@5.5.0)
debug: 3.2.7(supports-color@8.1.1)
node-ensure: 0.0.0
transitivePeerDependencies:
- supports-color
@ -23764,7 +23802,7 @@ snapshots:
rhea@1.0.24:
dependencies:
debug: 3.2.7(supports-color@5.5.0)
debug: 3.2.7(supports-color@8.1.1)
transitivePeerDependencies:
- supports-color