fix(editor): Fix error rendering and indexing of LLM sub-node outputs (#10688)

This commit is contained in:
oleg 2024-09-11 16:17:13 +02:00 committed by GitHub
parent 5c47a5f691
commit 50459bacab
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 69 additions and 31 deletions

View file

@ -20,7 +20,7 @@ type TokensUsageParser = (llmOutput: LLMResult['llmOutput']) => {
totalTokens: number;
};
type LastInput = {
type RunDetail = {
index: number;
messages: BaseMessage[] | string[] | string;
options: SerializedSecret | SerializedNotImplemented | SerializedFields;
@ -38,11 +38,13 @@ export class N8nLlmTracing extends BaseCallbackHandler {
completionTokensEstimate = 0;
lastInput: LastInput = {
index: 0,
messages: [],
options: {},
};
/**
* A map to associate LLM run IDs to run details.
* Key: Unique identifier for each LLM run (run ID)
* Value: RunDetails object
*
*/
runsMap: Record<string, RunDetail> = {};
options = {
// Default(OpenAI format) parser
@ -83,7 +85,11 @@ export class N8nLlmTracing extends BaseCallbackHandler {
return encodedListLength.reduce((acc, curr) => acc + curr, 0);
}
async handleLLMEnd(output: LLMResult) {
async handleLLMEnd(output: LLMResult, runId: string) {
// The fallback should never happen since handleLLMStart should always set the run details
// but just in case, we set the index to the length of the runsMap
const runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };
output.generations = output.generations.map((gen) =>
gen.map((g) => pick(g, ['text', 'generationInfo'])),
);
@ -120,47 +126,43 @@ export class N8nLlmTracing extends BaseCallbackHandler {
}
const parsedMessages =
typeof this.lastInput.messages === 'string'
? this.lastInput.messages
: this.lastInput.messages.map((message) => {
typeof runDetails.messages === 'string'
? runDetails.messages
: runDetails.messages.map((message) => {
if (typeof message === 'string') return message;
if (typeof message?.toJSON === 'function') return message.toJSON();
return message;
});
this.executionFunctions.addOutputData(this.connectionType, this.lastInput.index, [
this.executionFunctions.addOutputData(this.connectionType, runDetails.index, [
[{ json: { ...response } }],
]);
void logAiEvent(this.executionFunctions, 'n8n.ai.llm.generated', {
messages: parsedMessages,
options: this.lastInput.options,
options: runDetails.options,
response,
});
}
async handleLLMStart(llm: Serialized, prompts: string[]) {
async handleLLMStart(llm: Serialized, prompts: string[], runId: string) {
const estimatedTokens = await this.estimateTokensFromStringList(prompts);
const options = llm.type === 'constructor' ? llm.kwargs : llm;
const { index } = this.executionFunctions.addInputData(
this.connectionType,
const { index } = this.executionFunctions.addInputData(this.connectionType, [
[
[
{
json: {
messages: prompts,
estimatedTokens,
options,
},
{
json: {
messages: prompts,
estimatedTokens,
options,
},
],
},
],
this.lastInput.index + 1,
);
]);
// Save the last input for later use when processing `handleLLMEnd` event
this.lastInput = {
// Save the run details for later use when processing `handleLLMEnd` event
this.runsMap[runId] = {
index,
options,
messages: prompts,

View file

@ -56,6 +56,7 @@ import { useWorkflowsStore } from '@/stores/workflows.store';
import { useNDVStore } from '@/stores/ndv.store';
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
import { useNodeHelpers } from '@/composables/useNodeHelpers';
import { useNodeType } from '@/composables/useNodeType';
import { useToast } from '@/composables/useToast';
import { isEqual, isObject } from 'lodash-es';
import { useExternalHooks } from '@/composables/useExternalHooks';
@ -171,12 +172,16 @@ export default defineComponent({
runIndex: props.runIndex,
displayMode: ndvStore.getPanelDisplayMode(props.paneType),
});
const { isSubNodeType } = useNodeType({
node,
});
return {
...useToast(),
externalHooks,
nodeHelpers,
pinnedData,
isSubNodeType,
};
},
data() {
@ -308,6 +313,12 @@ export default defineComponent({
if (!this.node) {
return null;
}
// If the node is a sub-node, we need to get the parent node error to check for input errors
if (this.isSubNodeType && this.paneType === 'input') {
const parentNode = this.workflow.getChildNodes(this.node?.name ?? '', 'ALL_NON_MAIN')[0];
return this.workflowRunData?.[parentNode]?.[this.runIndex]?.error as NodeError;
}
return this.workflowRunData?.[this.node?.name]?.[this.runIndex]?.error as NodeError;
},
hasRunError(): boolean {

View file

@ -9,10 +9,12 @@ import hljs from 'highlight.js/lib/core';
import { useClipboard } from '@/composables/useClipboard';
import { useI18n } from '@/composables/useI18n';
import { useToast } from '@/composables/useToast';
import { NodeConnectionType, type IDataObject } from 'n8n-workflow';
import { NodeConnectionType } from 'n8n-workflow';
import type { NodeError, IDataObject } from 'n8n-workflow';
const props = defineProps<{
runData: IAiDataContent;
error?: NodeError;
}>();
const i18n = useI18n();
@ -149,7 +151,7 @@ onMounted(() => {
<p :class="$style.blockTitle">{{ capitalize(runData.inOut) }}</p>
<!-- @click.stop to prevent event from bubbling to blockHeader and toggling expanded state when clicking on rawSwitch -->
<el-switch
v-if="contentParsed"
v-if="contentParsed && !error"
v-model="isShowRaw"
:class="$style.rawSwitch"
active-text="RAW JSON"
@ -162,8 +164,10 @@ onMounted(() => {
[$style.blockContentExpanded]: isExpanded,
}"
>
<NodeErrorView v-if="error" :error="error" :class="$style.error" />
<div
v-for="({ parsedContent, raw }, index) in parsedRun"
v-else
:key="index"
:class="$style.contentText"
:data-content-type="parsedContent?.type"
@ -299,4 +303,7 @@ onMounted(() => {
padding: 0;
color: var(--color-text-base);
}
.error {
padding: var(--spacing-s) 0;
}
</style>

View file

@ -2,7 +2,12 @@
import type { IAiData, IAiDataContent } from '@/Interface';
import { useNodeTypesStore } from '@/stores/nodeTypes.store';
import { useWorkflowsStore } from '@/stores/workflows.store';
import type { INodeExecutionData, INodeTypeDescription, NodeConnectionType } from 'n8n-workflow';
import type {
INodeExecutionData,
INodeTypeDescription,
NodeConnectionType,
NodeError,
} from 'n8n-workflow';
import { computed } from 'vue';
import NodeIcon from '@/components/NodeIcon.vue';
import AiRunContentBlock from './AiRunContentBlock.vue';
@ -85,6 +90,16 @@ const runMeta = computed(() => {
}
return extractRunMeta(outputRun.value);
});
const executionRunData = computed(() => {
return workflowsStore.getWorkflowExecution?.data?.resultData?.runData;
});
const outputError = computed(() => {
return executionRunData.value?.[props.inputData.node]?.[props.inputData.runIndex]?.error as
| NodeError
| undefined;
});
</script>
<template>
@ -155,7 +170,10 @@ const runMeta = computed(() => {
</header>
<main v-for="(run, index) in props.inputData.data" :key="index" :class="$style.content">
<AiRunContentBlock :run-data="run" />
<AiRunContentBlock
:run-data="run"
:error="run.inOut === 'output' ? outputError : undefined"
/>
</main>
</div>
</template>