fix(editor): Fix run index input for RunData view in sub-nodes (#11538)

This commit is contained in:
oleg 2024-11-05 16:47:45 +01:00 committed by GitHub
parent dfd785bc08
commit 434d31ce92
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 516 additions and 90 deletions

View file

@ -44,6 +44,7 @@ import {
openNode, openNode,
getConnectionBySourceAndTarget, getConnectionBySourceAndTarget,
} from '../composables/workflow'; } from '../composables/workflow';
import { NDV, WorkflowPage } from '../pages';
import { createMockNodeExecutionData, runMockWorkflowExecution } from '../utils'; import { createMockNodeExecutionData, runMockWorkflowExecution } from '../utils';
describe('Langchain Integration', () => { describe('Langchain Integration', () => {
@ -232,95 +233,96 @@ describe('Langchain Integration', () => {
const inputMessage = 'Hello!'; const inputMessage = 'Hello!';
const outputMessage = 'Hi there! How can I assist you today?'; const outputMessage = 'Hi there! How can I assist you today?';
const runData = [
createMockNodeExecutionData(MANUAL_CHAT_TRIGGER_NODE_NAME, {
jsonData: {
main: { input: inputMessage },
},
}),
createMockNodeExecutionData(AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, {
jsonData: {
ai_languageModel: {
response: {
generations: [
{
text: `{
"action": "Final Answer",
"action_input": "${outputMessage}"
}`,
message: {
lc: 1,
type: 'constructor',
id: ['langchain', 'schema', 'AIMessage'],
kwargs: {
content: `{
"action": "Final Answer",
"action_input": "${outputMessage}"
}`,
additional_kwargs: {},
},
},
generationInfo: { finish_reason: 'stop' },
},
],
llmOutput: {
tokenUsage: {
completionTokens: 26,
promptTokens: 519,
totalTokens: 545,
},
},
},
},
},
metadata: {
subRun: [{ node: AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, runIndex: 0 }],
},
inputOverride: {
ai_languageModel: [
[
{
json: {
messages: [
{
lc: 1,
type: 'constructor',
id: ['langchain', 'schema', 'SystemMessage'],
kwargs: {
content:
'Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist. However, above all else, all responses must adhere to the format of RESPONSE FORMAT INSTRUCTIONS.',
additional_kwargs: {},
},
},
{
lc: 1,
type: 'constructor',
id: ['langchain', 'schema', 'HumanMessage'],
kwargs: {
content:
'TOOLS\n------\nAssistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:\n\n\n\nRESPONSE FORMAT INSTRUCTIONS\n----------------------------\n\nOutput a JSON markdown code snippet containing a valid JSON object in one of two formats:\n\n**Option 1:**\nUse this if you want the human to use a tool.\nMarkdown code snippet formatted in the following schema:\n\n```json\n{\n "action": string, // The action to take. Must be one of []\n "action_input": string // The input to the action. May be a stringified object.\n}\n```\n\n**Option #2:**\nUse this if you want to respond directly and conversationally to the human. Markdown code snippet formatted in the following schema:\n\n```json\n{\n "action": "Final Answer",\n "action_input": string // You should put what you want to return to use here and make sure to use valid json newline characters.\n}\n```\n\nFor both options, remember to always include the surrounding markdown code snippet delimiters (begin with "```json" and end with "```")!\n\n\nUSER\'S INPUT\n--------------------\nHere is the user\'s input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\nHello!',
additional_kwargs: {},
},
},
],
options: { stop: ['Observation:'], promptIndex: 0 },
},
},
],
],
},
}),
createMockNodeExecutionData(AGENT_NODE_NAME, {
jsonData: {
main: { output: 'Hi there! How can I assist you today?' },
},
}),
];
runMockWorkflowExecution({ runMockWorkflowExecution({
trigger: () => { trigger: () => {
sendManualChatMessage(inputMessage); sendManualChatMessage(inputMessage);
}, },
runData: [ runData,
createMockNodeExecutionData(MANUAL_CHAT_TRIGGER_NODE_NAME, {
jsonData: {
main: { input: inputMessage },
},
}),
createMockNodeExecutionData(AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, {
jsonData: {
ai_languageModel: {
response: {
generations: [
{
text: `{
"action": "Final Answer",
"action_input": "${outputMessage}"
}`,
message: {
lc: 1,
type: 'constructor',
id: ['langchain', 'schema', 'AIMessage'],
kwargs: {
content: `{
"action": "Final Answer",
"action_input": "${outputMessage}"
}`,
additional_kwargs: {},
},
},
generationInfo: { finish_reason: 'stop' },
},
],
llmOutput: {
tokenUsage: {
completionTokens: 26,
promptTokens: 519,
totalTokens: 545,
},
},
},
},
},
metadata: {
subRun: [{ node: AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, runIndex: 0 }],
},
inputOverride: {
ai_languageModel: [
[
{
json: {
messages: [
{
lc: 1,
type: 'constructor',
id: ['langchain', 'schema', 'SystemMessage'],
kwargs: {
content:
'Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist. However, above all else, all responses must adhere to the format of RESPONSE FORMAT INSTRUCTIONS.',
additional_kwargs: {},
},
},
{
lc: 1,
type: 'constructor',
id: ['langchain', 'schema', 'HumanMessage'],
kwargs: {
content:
'TOOLS\n------\nAssistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are:\n\n\n\nRESPONSE FORMAT INSTRUCTIONS\n----------------------------\n\nOutput a JSON markdown code snippet containing a valid JSON object in one of two formats:\n\n**Option 1:**\nUse this if you want the human to use a tool.\nMarkdown code snippet formatted in the following schema:\n\n```json\n{\n "action": string, // The action to take. Must be one of []\n "action_input": string // The input to the action. May be a stringified object.\n}\n```\n\n**Option #2:**\nUse this if you want to respond directly and conversationally to the human. Markdown code snippet formatted in the following schema:\n\n```json\n{\n "action": "Final Answer",\n "action_input": string // You should put what you want to return to use here and make sure to use valid json newline characters.\n}\n```\n\nFor both options, remember to always include the surrounding markdown code snippet delimiters (begin with "```json" and end with "```")!\n\n\nUSER\'S INPUT\n--------------------\nHere is the user\'s input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else):\n\nHello!',
additional_kwargs: {},
},
},
],
options: { stop: ['Observation:'], promptIndex: 0 },
},
},
],
],
},
}),
createMockNodeExecutionData(AGENT_NODE_NAME, {
jsonData: {
main: { output: 'Hi there! How can I assist you today?' },
},
}),
],
lastNodeExecuted: AGENT_NODE_NAME, lastNodeExecuted: AGENT_NODE_NAME,
}); });
@ -357,4 +359,56 @@ describe('Langchain Integration', () => {
getConnectionBySourceAndTarget(CHAT_TRIGGER_NODE_DISPLAY_NAME, AGENT_NODE_NAME).should('exist'); getConnectionBySourceAndTarget(CHAT_TRIGGER_NODE_DISPLAY_NAME, AGENT_NODE_NAME).should('exist');
getNodes().should('have.length', 3); getNodes().should('have.length', 3);
}); });
it('should render runItems for sub-nodes and allow switching between them', () => {
const workflowPage = new WorkflowPage();
const ndv = new NDV();
cy.visit(workflowPage.url);
cy.createFixtureWorkflow('In_memory_vector_store_fake_embeddings.json');
workflowPage.actions.zoomToFit();
workflowPage.actions.executeNode('Populate VS');
cy.get('[data-label="25 items"]').should('exist');
const assertInputOutputText = (text: string, assertion: 'exist' | 'not.exist') => {
ndv.getters.outputPanel().contains(text).should(assertion);
ndv.getters.inputPanel().contains(text).should(assertion);
};
workflowPage.actions.openNode('Character Text Splitter');
ndv.getters.outputRunSelector().should('exist');
ndv.getters.inputRunSelector().should('exist');
ndv.getters.inputRunSelector().find('input').should('include.value', '3 of 3');
ndv.getters.outputRunSelector().find('input').should('include.value', '3 of 3');
assertInputOutputText('Kyiv', 'exist');
assertInputOutputText('Berlin', 'not.exist');
assertInputOutputText('Prague', 'not.exist');
ndv.actions.changeOutputRunSelector('2 of 3');
assertInputOutputText('Berlin', 'exist');
assertInputOutputText('Kyiv', 'not.exist');
assertInputOutputText('Prague', 'not.exist');
ndv.actions.changeOutputRunSelector('1 of 3');
assertInputOutputText('Prague', 'exist');
assertInputOutputText('Berlin', 'not.exist');
assertInputOutputText('Kyiv', 'not.exist');
ndv.actions.toggleInputRunLinking();
ndv.actions.changeOutputRunSelector('2 of 3');
ndv.getters.inputRunSelector().find('input').should('include.value', '1 of 3');
ndv.getters.outputRunSelector().find('input').should('include.value', '2 of 3');
ndv.getters.inputPanel().contains('Prague').should('exist');
ndv.getters.inputPanel().contains('Berlin').should('not.exist');
ndv.getters.outputPanel().contains('Berlin').should('exist');
ndv.getters.outputPanel().contains('Prague').should('not.exist');
ndv.actions.toggleInputRunLinking();
ndv.getters.inputRunSelector().find('input').should('include.value', '1 of 3');
ndv.getters.outputRunSelector().find('input').should('include.value', '1 of 3');
assertInputOutputText('Prague', 'exist');
assertInputOutputText('Berlin', 'not.exist');
assertInputOutputText('Kyiv', 'not.exist');
});
}); });

File diff suppressed because one or more lines are too long

View file

@ -16,7 +16,7 @@ export function createMockNodeExecutionData(
return { return {
[name]: { [name]: {
startTime: new Date().getTime(), startTime: new Date().getTime(),
executionTime: 0, executionTime: 1,
executionStatus, executionStatus,
data: jsonData data: jsonData
? Object.keys(jsonData).reduce((acc, key) => { ? Object.keys(jsonData).reduce((acc, key) => {
@ -33,6 +33,7 @@ export function createMockNodeExecutionData(
}, {} as ITaskDataConnections) }, {} as ITaskDataConnections)
: data, : data,
source: [null], source: [null],
inputOverride,
...rest, ...rest,
}, },
}; };

View file

@ -179,7 +179,7 @@ export default defineComponent({
rootNode(): string { rootNode(): string {
const workflow = this.workflow; const workflow = this.workflow;
const rootNodes = workflow.getChildNodes(this.activeNode?.name ?? '', 'ALL_NON_MAIN'); const rootNodes = workflow.getChildNodes(this.activeNode?.name ?? '', 'ALL');
return rootNodes[0]; return rootNodes[0];
}, },
@ -342,7 +342,7 @@ export default defineComponent({
:node="currentNode" :node="currentNode"
:nodes="isMappingMode ? rootNodesParents : parentNodes" :nodes="isMappingMode ? rootNodesParents : parentNodes"
:workflow="workflow" :workflow="workflow"
:run-index="runIndex" :run-index="isMappingMode ? 0 : runIndex"
:linked-runs="linkedRuns" :linked-runs="linkedRuns"
:can-link-runs="!mappedNode && canLinkRuns" :can-link-runs="!mappedNode && canLinkRuns"
:too-much-data-title="$locale.baseText('ndv.input.tooMuchData.title')" :too-much-data-title="$locale.baseText('ndv.input.tooMuchData.title')"

View file

@ -154,6 +154,29 @@ const parentNode = computed(() => {
}); });
const inputNodeName = computed<string | undefined>(() => { const inputNodeName = computed<string | undefined>(() => {
const nodeOutputs =
activeNode.value && activeNodeType.value
? NodeHelpers.getNodeOutputs(props.workflowObject, activeNode.value, activeNodeType.value)
: [];
const nonMainOutputs = nodeOutputs.filter((output) => {
if (typeof output === 'string') return output !== NodeConnectionType.Main;
return output.type !== NodeConnectionType.Main;
});
const isSubNode = nonMainOutputs.length > 0;
if (isSubNode && activeNode.value) {
// For sub-nodes, we need to get their connected output node to determine the input
// because sub-nodes use specialized outputs (e.g. NodeConnectionType.AiTool)
// instead of the standard Main output type
const connectedOutputNode = props.workflowObject.getChildNodes(
activeNode.value.name,
'ALL_NON_MAIN',
)?.[0];
return connectedOutputNode;
}
return selectedInput.value || parentNode.value; return selectedInput.value || parentNode.value;
}); });

View file

@ -164,8 +164,9 @@ const aiData = computed<AIResult[]>(() => {
const result: AIResult[] = []; const result: AIResult[] = [];
const connectedSubNodes = props.workflow.getParentNodes(props.node.name, 'ALL_NON_MAIN'); const connectedSubNodes = props.workflow.getParentNodes(props.node.name, 'ALL_NON_MAIN');
const rootNodeResult = workflowsStore.getWorkflowResultDataByNodeName(props.node.name); const rootNodeResult = workflowsStore.getWorkflowResultDataByNodeName(props.node.name);
const rootNodeStartTime = rootNodeResult?.[0]?.startTime ?? 0; const rootNodeStartTime = rootNodeResult?.[props.runIndex ?? 0]?.startTime ?? 0;
const rootNodeEndTime = rootNodeStartTime + (rootNodeResult?.[0]?.executionTime ?? 0); const rootNodeEndTime =
rootNodeStartTime + (rootNodeResult?.[props.runIndex ?? 0]?.executionTime ?? 0);
connectedSubNodes.forEach((nodeName) => { connectedSubNodes.forEach((nodeName) => {
const nodeRunData = workflowsStore.getWorkflowResultDataByNodeName(nodeName) ?? []; const nodeRunData = workflowsStore.getWorkflowResultDataByNodeName(nodeName) ?? [];
@ -193,7 +194,7 @@ const aiData = computed<AIResult[]>(() => {
const currentNodeResult = result.filter((r) => { const currentNodeResult = result.filter((r) => {
const startTime = r.data?.metadata?.startTime ?? 0; const startTime = r.data?.metadata?.startTime ?? 0;
return startTime >= rootNodeStartTime && startTime <= rootNodeEndTime; return startTime >= rootNodeStartTime && startTime < rootNodeEndTime;
}); });
return currentNodeResult; return currentNodeResult;