mirror of
https://github.com/n8n-io/n8n.git
synced 2025-03-05 20:50:17 -08:00
feat(core): Add error reporting for workflow evaluation (no-changelog) (#12838)
Co-authored-by: oleg <me@olegivaniv.com>
This commit is contained in:
parent
17acf70591
commit
02573b46e0
|
@ -1,4 +1,4 @@
|
|||
import type { ExecutionError } from 'n8n-workflow/src';
|
||||
import type { ExecutionError } from 'n8n-workflow';
|
||||
|
||||
import {
|
||||
closeManualChatModal,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
|
||||
import { FakeLLM, FakeListChatModel } from '@langchain/core/utils/testing';
|
||||
import get from 'lodash/get';
|
||||
import type { IDataObject, IExecuteFunctions } from 'n8n-workflow/src';
|
||||
import type { IDataObject, IExecuteFunctions } from 'n8n-workflow';
|
||||
|
||||
import { makeZodSchemaFromAttributes } from '../helpers';
|
||||
import { InformationExtractor } from '../InformationExtractor.node';
|
||||
|
|
|
@ -19,6 +19,7 @@ import * as CrashJournal from '@/crash-journal';
|
|||
import * as Db from '@/db';
|
||||
import { getDataDeduplicationService } from '@/deduplication';
|
||||
import { DeprecationService } from '@/deprecation/deprecation.service';
|
||||
import { TestRunnerService } from '@/evaluation.ee/test-runner/test-runner.service.ee';
|
||||
import { MessageEventBus } from '@/eventbus/message-event-bus/message-event-bus';
|
||||
import { TelemetryEventRelay } from '@/events/relays/telemetry.event-relay';
|
||||
import { initExpressionEvaluator } from '@/expression-evaluator';
|
||||
|
@ -259,6 +260,10 @@ export abstract class BaseCommand extends Command {
|
|||
Container.get(WorkflowHistoryManager).init();
|
||||
}
|
||||
|
||||
async cleanupTestRunner() {
|
||||
await Container.get(TestRunnerService).cleanupIncompleteRuns();
|
||||
}
|
||||
|
||||
async finally(error: Error | undefined) {
|
||||
if (inTest || this.id === 'start') return;
|
||||
if (Db.connectionState.connected) {
|
||||
|
|
|
@ -222,6 +222,11 @@ export class Start extends BaseCommand {
|
|||
this.initWorkflowHistory();
|
||||
this.logger.debug('Workflow history init complete');
|
||||
|
||||
if (!isMultiMainEnabled) {
|
||||
await this.cleanupTestRunner();
|
||||
this.logger.debug('Test runner cleanup complete');
|
||||
}
|
||||
|
||||
if (!this.globalConfig.endpoints.disableUi) {
|
||||
await this.generateStaticAssets();
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import { Column, Entity, ManyToOne, OneToOne } from '@n8n/typeorm';
|
||||
import type { IDataObject } from 'n8n-workflow';
|
||||
|
||||
import {
|
||||
datetimeColumnType,
|
||||
|
@ -7,9 +8,19 @@ import {
|
|||
} from '@/databases/entities/abstract-entity';
|
||||
import type { ExecutionEntity } from '@/databases/entities/execution-entity';
|
||||
import { TestRun } from '@/databases/entities/test-run.ee';
|
||||
import type { TestCaseExecutionErrorCode } from '@/evaluation.ee/test-runner/errors.ee';
|
||||
|
||||
export type TestCaseRunMetrics = Record<string, number | boolean>;
|
||||
|
||||
export type TestCaseExecutionStatus =
|
||||
| 'new' // Test case execution was created and added to the test run, but has not been started yet
|
||||
| 'running' // Workflow under test is running
|
||||
| 'evaluation_running' // Evaluation workflow is running
|
||||
| 'success' // Both workflows have completed successfully
|
||||
| 'error' // An error occurred during the execution of workflow under test or evaluation workflow
|
||||
| 'warning' // There were warnings during the execution of workflow under test or evaluation workflow. Used only to signal possible issues to user, not to indicate a failure.
|
||||
| 'cancelled';
|
||||
|
||||
/**
|
||||
* This entity represents the linking between the test runs and individual executions.
|
||||
* It stores status, links to past, new and evaluation executions, and metrics produced by individual evaluation wf executions
|
||||
|
@ -49,7 +60,7 @@ export class TestCaseExecution extends WithStringId {
|
|||
evaluationExecutionId: string | null;
|
||||
|
||||
@Column()
|
||||
status: 'new' | 'running' | 'evaluation_running' | 'success' | 'error' | 'cancelled';
|
||||
status: TestCaseExecutionStatus;
|
||||
|
||||
@Column({ type: datetimeColumnType, nullable: true })
|
||||
runAt: Date | null;
|
||||
|
@ -58,10 +69,10 @@ export class TestCaseExecution extends WithStringId {
|
|||
completedAt: Date | null;
|
||||
|
||||
@Column('varchar', { nullable: true })
|
||||
errorCode: string | null;
|
||||
errorCode: TestCaseExecutionErrorCode | null;
|
||||
|
||||
@Column(jsonColumnType, { nullable: true })
|
||||
errorDetails: Record<string, unknown>;
|
||||
errorDetails: IDataObject | null;
|
||||
|
||||
@Column(jsonColumnType, { nullable: true })
|
||||
metrics: TestCaseRunMetrics;
|
||||
|
|
|
@ -1,13 +1,17 @@
|
|||
import { Column, Entity, Index, ManyToOne, RelationId } from '@n8n/typeorm';
|
||||
import { Column, Entity, Index, ManyToOne, OneToMany, RelationId } from '@n8n/typeorm';
|
||||
import type { IDataObject } from 'n8n-workflow';
|
||||
|
||||
import {
|
||||
datetimeColumnType,
|
||||
jsonColumnType,
|
||||
WithTimestampsAndStringId,
|
||||
} from '@/databases/entities/abstract-entity';
|
||||
import type { TestCaseExecution } from '@/databases/entities/test-case-execution.ee';
|
||||
import { TestDefinition } from '@/databases/entities/test-definition.ee';
|
||||
import type { TestRunFinalResult } from '@/databases/repositories/test-run.repository.ee';
|
||||
import type { TestRunErrorCode } from '@/evaluation.ee/test-runner/errors.ee';
|
||||
|
||||
type TestRunStatus = 'new' | 'running' | 'completed' | 'error' | 'cancelled';
|
||||
export type TestRunStatus = 'new' | 'running' | 'completed' | 'error' | 'cancelled';
|
||||
|
||||
export type AggregatedTestRunMetrics = Record<string, number | boolean>;
|
||||
|
||||
|
@ -54,4 +58,26 @@ export class TestRun extends WithTimestampsAndStringId {
|
|||
*/
|
||||
@Column('integer', { nullable: true })
|
||||
failedCases: number;
|
||||
|
||||
/**
|
||||
* This will contain the error code if the test run failed.
|
||||
* This is used for test run level errors, not for individual test case errors.
|
||||
*/
|
||||
@Column('varchar', { nullable: true, length: 255 })
|
||||
errorCode: TestRunErrorCode | null;
|
||||
|
||||
/**
|
||||
* Optional details about the error that happened during the test run
|
||||
*/
|
||||
@Column(jsonColumnType, { nullable: true })
|
||||
errorDetails: IDataObject | null;
|
||||
|
||||
@OneToMany('TestCaseExecution', 'testRun')
|
||||
testCaseExecutions: TestCaseExecution[];
|
||||
|
||||
/**
|
||||
* Calculated property to determine the final result of the test run
|
||||
* depending on the statuses of test case executions
|
||||
*/
|
||||
finalResult?: TestRunFinalResult | null;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
import type { MigrationContext, ReversibleMigration } from '@/databases/types';
|
||||
|
||||
// We have to use raw query migration instead of schemaBuilder helpers,
|
||||
// because the typeorm schema builder implements addColumns by a table recreate for sqlite
|
||||
// which causes weird issues with the migration
|
||||
export class AddErrorColumnsToTestRuns1737715421462 implements ReversibleMigration {
|
||||
async up({ escape, runQuery }: MigrationContext) {
|
||||
const tableName = escape.tableName('test_run');
|
||||
const errorCodeColumnName = escape.columnName('errorCode');
|
||||
const errorDetailsColumnName = escape.columnName('errorDetails');
|
||||
|
||||
await runQuery(`ALTER TABLE ${tableName} ADD COLUMN ${errorCodeColumnName} VARCHAR(255);`);
|
||||
await runQuery(`ALTER TABLE ${tableName} ADD COLUMN ${errorDetailsColumnName} TEXT;`);
|
||||
}
|
||||
|
||||
async down({ escape, runQuery }: MigrationContext) {
|
||||
const tableName = escape.tableName('test_run');
|
||||
const errorCodeColumnName = escape.columnName('errorCode');
|
||||
const errorDetailsColumnName = escape.columnName('errorDetails');
|
||||
|
||||
await runQuery(`ALTER TABLE ${tableName} DROP COLUMN ${errorCodeColumnName};`);
|
||||
await runQuery(`ALTER TABLE ${tableName} DROP COLUMN ${errorDetailsColumnName};`);
|
||||
}
|
||||
}
|
|
@ -78,6 +78,7 @@ import { AddMockedNodesColumnToTestDefinition1733133775640 } from '../common/173
|
|||
import { AddManagedColumnToCredentialsTable1734479635324 } from '../common/1734479635324-AddManagedColumnToCredentialsTable';
|
||||
import { AddStatsColumnsToTestRun1736172058779 } from '../common/1736172058779-AddStatsColumnsToTestRun';
|
||||
import { CreateTestCaseExecutionTable1736947513045 } from '../common/1736947513045-CreateTestCaseExecutionTable';
|
||||
import { AddErrorColumnsToTestRuns1737715421462 } from '../common/1737715421462-AddErrorColumnsToTestRuns';
|
||||
|
||||
export const mysqlMigrations: Migration[] = [
|
||||
InitialMigration1588157391238,
|
||||
|
@ -158,4 +159,5 @@ export const mysqlMigrations: Migration[] = [
|
|||
AddProjectIcons1729607673469,
|
||||
AddStatsColumnsToTestRun1736172058779,
|
||||
CreateTestCaseExecutionTable1736947513045,
|
||||
AddErrorColumnsToTestRuns1737715421462,
|
||||
];
|
||||
|
|
|
@ -78,6 +78,7 @@ import { AddMockedNodesColumnToTestDefinition1733133775640 } from '../common/173
|
|||
import { AddManagedColumnToCredentialsTable1734479635324 } from '../common/1734479635324-AddManagedColumnToCredentialsTable';
|
||||
import { AddStatsColumnsToTestRun1736172058779 } from '../common/1736172058779-AddStatsColumnsToTestRun';
|
||||
import { CreateTestCaseExecutionTable1736947513045 } from '../common/1736947513045-CreateTestCaseExecutionTable';
|
||||
import { AddErrorColumnsToTestRuns1737715421462 } from '../common/1737715421462-AddErrorColumnsToTestRuns';
|
||||
|
||||
export const postgresMigrations: Migration[] = [
|
||||
InitialMigration1587669153312,
|
||||
|
@ -158,4 +159,5 @@ export const postgresMigrations: Migration[] = [
|
|||
AddProjectIcons1729607673469,
|
||||
AddStatsColumnsToTestRun1736172058779,
|
||||
CreateTestCaseExecutionTable1736947513045,
|
||||
AddErrorColumnsToTestRuns1737715421462,
|
||||
];
|
||||
|
|
|
@ -75,6 +75,7 @@ import { AddMockedNodesColumnToTestDefinition1733133775640 } from '../common/173
|
|||
import { AddManagedColumnToCredentialsTable1734479635324 } from '../common/1734479635324-AddManagedColumnToCredentialsTable';
|
||||
import { AddStatsColumnsToTestRun1736172058779 } from '../common/1736172058779-AddStatsColumnsToTestRun';
|
||||
import { CreateTestCaseExecutionTable1736947513045 } from '../common/1736947513045-CreateTestCaseExecutionTable';
|
||||
import { AddErrorColumnsToTestRuns1737715421462 } from '../common/1737715421462-AddErrorColumnsToTestRuns';
|
||||
|
||||
const sqliteMigrations: Migration[] = [
|
||||
InitialMigration1588102412422,
|
||||
|
@ -152,6 +153,7 @@ const sqliteMigrations: Migration[] = [
|
|||
AddProjectIcons1729607673469,
|
||||
AddStatsColumnsToTestRun1736172058779,
|
||||
CreateTestCaseExecutionTable1736947513045,
|
||||
AddErrorColumnsToTestRuns1737715421462,
|
||||
];
|
||||
|
||||
export { sqliteMigrations };
|
||||
|
|
|
@ -2,8 +2,35 @@ import { Service } from '@n8n/di';
|
|||
import type { EntityManager } from '@n8n/typeorm';
|
||||
import { DataSource, In, Not, Repository } from '@n8n/typeorm';
|
||||
import type { DeepPartial } from '@n8n/typeorm/common/DeepPartial';
|
||||
import type { IDataObject } from 'n8n-workflow';
|
||||
|
||||
import { TestCaseExecution } from '@/databases/entities/test-case-execution.ee';
|
||||
import type { TestCaseExecutionErrorCode } from '@/evaluation.ee/test-runner/errors.ee';
|
||||
|
||||
type StatusUpdateOptions = {
|
||||
testRunId: string;
|
||||
pastExecutionId: string;
|
||||
trx?: EntityManager;
|
||||
};
|
||||
|
||||
type MarkAsFailedOptions = StatusUpdateOptions & {
|
||||
errorCode?: TestCaseExecutionErrorCode;
|
||||
errorDetails?: IDataObject;
|
||||
};
|
||||
|
||||
type MarkAsWarningOptions = MarkAsFailedOptions;
|
||||
|
||||
type MarkAsRunningOptions = StatusUpdateOptions & {
|
||||
executionId: string;
|
||||
};
|
||||
|
||||
type MarkAsEvaluationRunningOptions = StatusUpdateOptions & {
|
||||
evaluationExecutionId: string;
|
||||
};
|
||||
|
||||
type MarkAsCompletedOptions = StatusUpdateOptions & {
|
||||
metrics: Record<string, number>;
|
||||
};
|
||||
|
||||
@Service()
|
||||
export class TestCaseExecutionRepository extends Repository<TestCaseExecution> {
|
||||
|
@ -27,8 +54,11 @@ export class TestCaseExecutionRepository extends Repository<TestCaseExecution> {
|
|||
return await this.save(mappings);
|
||||
}
|
||||
|
||||
async markAsRunning(testRunId: string, pastExecutionId: string, executionId: string) {
|
||||
return await this.update(
|
||||
async markAsRunning({ testRunId, pastExecutionId, executionId, trx }: MarkAsRunningOptions) {
|
||||
trx = trx ?? this.manager;
|
||||
|
||||
return await trx.update(
|
||||
TestCaseExecution,
|
||||
{ testRun: { id: testRunId }, pastExecutionId },
|
||||
{
|
||||
status: 'running',
|
||||
|
@ -38,12 +68,16 @@ export class TestCaseExecutionRepository extends Repository<TestCaseExecution> {
|
|||
);
|
||||
}
|
||||
|
||||
async markAsEvaluationRunning(
|
||||
testRunId: string,
|
||||
pastExecutionId: string,
|
||||
evaluationExecutionId: string,
|
||||
) {
|
||||
return await this.update(
|
||||
async markAsEvaluationRunning({
|
||||
testRunId,
|
||||
pastExecutionId,
|
||||
evaluationExecutionId,
|
||||
trx,
|
||||
}: MarkAsEvaluationRunningOptions) {
|
||||
trx = trx ?? this.manager;
|
||||
|
||||
return await trx.update(
|
||||
TestCaseExecution,
|
||||
{ testRun: { id: testRunId }, pastExecutionId },
|
||||
{
|
||||
status: 'evaluation_running',
|
||||
|
@ -52,12 +86,7 @@ export class TestCaseExecutionRepository extends Repository<TestCaseExecution> {
|
|||
);
|
||||
}
|
||||
|
||||
async markAsCompleted(
|
||||
testRunId: string,
|
||||
pastExecutionId: string,
|
||||
metrics: Record<string, number>,
|
||||
trx?: EntityManager,
|
||||
) {
|
||||
async markAsCompleted({ testRunId, pastExecutionId, metrics, trx }: MarkAsCompletedOptions) {
|
||||
trx = trx ?? this.manager;
|
||||
|
||||
return await trx.update(
|
||||
|
@ -84,7 +113,13 @@ export class TestCaseExecutionRepository extends Repository<TestCaseExecution> {
|
|||
);
|
||||
}
|
||||
|
||||
async markAsFailed(testRunId: string, pastExecutionId: string, trx?: EntityManager) {
|
||||
async markAsFailed({
|
||||
testRunId,
|
||||
pastExecutionId,
|
||||
errorCode,
|
||||
errorDetails,
|
||||
trx,
|
||||
}: MarkAsFailedOptions) {
|
||||
trx = trx ?? this.manager;
|
||||
|
||||
return await trx.update(
|
||||
|
@ -93,6 +128,25 @@ export class TestCaseExecutionRepository extends Repository<TestCaseExecution> {
|
|||
{
|
||||
status: 'error',
|
||||
completedAt: new Date(),
|
||||
errorCode,
|
||||
errorDetails,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
async markAsWarning({
|
||||
testRunId,
|
||||
pastExecutionId,
|
||||
errorCode,
|
||||
errorDetails,
|
||||
}: MarkAsWarningOptions) {
|
||||
return await this.update(
|
||||
{ testRun: { id: testRunId }, pastExecutionId },
|
||||
{
|
||||
status: 'warning',
|
||||
completedAt: new Date(),
|
||||
errorCode,
|
||||
errorDetails,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,11 +1,21 @@
|
|||
import { Service } from '@n8n/di';
|
||||
import type { EntityManager, FindManyOptions } from '@n8n/typeorm';
|
||||
import { DataSource, Repository } from '@n8n/typeorm';
|
||||
import { DataSource, In, Repository } from '@n8n/typeorm';
|
||||
import type { IDataObject } from 'n8n-workflow';
|
||||
|
||||
import type { AggregatedTestRunMetrics } from '@/databases/entities/test-run.ee';
|
||||
import { TestRun } from '@/databases/entities/test-run.ee';
|
||||
import { NotFoundError } from '@/errors/response-errors/not-found.error';
|
||||
import type { TestRunErrorCode } from '@/evaluation.ee/test-runner/errors.ee';
|
||||
import { getTestRunFinalResult } from '@/evaluation.ee/test-runner/utils.ee';
|
||||
import type { ListQuery } from '@/requests';
|
||||
|
||||
export type TestRunFinalResult = 'success' | 'error' | 'warning';
|
||||
|
||||
export type TestRunSummary = TestRun & {
|
||||
finalResult: TestRunFinalResult | null;
|
||||
};
|
||||
|
||||
@Service()
|
||||
export class TestRunRepository extends Repository<TestRun> {
|
||||
constructor(dataSource: DataSource) {
|
||||
|
@ -40,6 +50,21 @@ export class TestRunRepository extends Repository<TestRun> {
|
|||
return await trx.update(TestRun, id, { status: 'cancelled' });
|
||||
}
|
||||
|
||||
async markAsError(id: string, errorCode: TestRunErrorCode, errorDetails?: IDataObject) {
|
||||
return await this.update(id, {
|
||||
status: 'error',
|
||||
errorCode,
|
||||
errorDetails,
|
||||
});
|
||||
}
|
||||
|
||||
async markAllIncompleteAsFailed() {
|
||||
return await this.update(
|
||||
{ status: In(['new', 'running']) },
|
||||
{ status: 'error', errorCode: 'INTERRUPTED' },
|
||||
);
|
||||
}
|
||||
|
||||
async incrementPassed(id: string, trx?: EntityManager) {
|
||||
trx = trx ?? this.manager;
|
||||
return await trx.increment(TestRun, { id }, 'passedCases', 1);
|
||||
|
@ -51,9 +76,11 @@ export class TestRunRepository extends Repository<TestRun> {
|
|||
}
|
||||
|
||||
async getMany(testDefinitionId: string, options: ListQuery.Options) {
|
||||
// FIXME: optimize fetching final result of each test run
|
||||
const findManyOptions: FindManyOptions<TestRun> = {
|
||||
where: { testDefinition: { id: testDefinitionId } },
|
||||
order: { createdAt: 'DESC' },
|
||||
relations: ['testCaseExecutions'],
|
||||
};
|
||||
|
||||
if (options?.take) {
|
||||
|
@ -61,6 +88,37 @@ export class TestRunRepository extends Repository<TestRun> {
|
|||
findManyOptions.take = options.take;
|
||||
}
|
||||
|
||||
return await this.find(findManyOptions);
|
||||
const testRuns = await this.find(findManyOptions);
|
||||
|
||||
return testRuns.map(({ testCaseExecutions, ...testRun }) => {
|
||||
const finalResult =
|
||||
testRun.status === 'completed' ? getTestRunFinalResult(testCaseExecutions) : null;
|
||||
return { ...testRun, finalResult };
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Test run summary is a TestRun with a final result.
|
||||
* Final result is calculated based on the status of all test case executions.
|
||||
* E.g. Test Run is considered successful if all test case executions are successful.
|
||||
* Test Run is considered failed if at least one test case execution is failed.
|
||||
*/
|
||||
async getTestRunSummaryById(
|
||||
testDefinitionId: string,
|
||||
testRunId: string,
|
||||
): Promise<TestRunSummary> {
|
||||
const testRun = await this.findOne({
|
||||
where: { id: testRunId, testDefinition: { id: testDefinitionId } },
|
||||
relations: ['testCaseExecutions'],
|
||||
});
|
||||
|
||||
if (!testRun) {
|
||||
throw new NotFoundError('Test run not found');
|
||||
}
|
||||
|
||||
testRun.finalResult =
|
||||
testRun.status === 'completed' ? getTestRunFinalResult(testRun.testCaseExecutions) : null;
|
||||
|
||||
return testRun as TestRunSummary;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -126,13 +126,10 @@ export class TestDefinitionService {
|
|||
);
|
||||
const existingNodeIds = new Map(existingTestDefinition.workflow.nodes.map((n) => [n.id, n]));
|
||||
|
||||
attrs.mockedNodes.forEach((node) => {
|
||||
if (!existingNodeIds.has(node.id) || (node.name && !existingNodeNames.has(node.name))) {
|
||||
throw new BadRequestError(
|
||||
`Pinned node not found in the workflow: ${node.id} (${node.name})`,
|
||||
);
|
||||
}
|
||||
});
|
||||
// If some node was previously mocked and then removed from the workflow, it should be removed from the mocked nodes
|
||||
attrs.mockedNodes = attrs.mockedNodes.filter(
|
||||
(node) => existingNodeIds.has(node.id) || (node.name && existingNodeNames.has(node.name)),
|
||||
);
|
||||
|
||||
// Update the node names OR node ids if they are not provided
|
||||
attrs.mockedNodes = attrs.mockedNodes.map((node) => {
|
||||
|
|
|
@ -13,29 +13,23 @@ describe('EvaluationMetrics', () => {
|
|||
expect(aggregatedMetrics).toEqual({ metric1: 0.75, metric2: 0.1 });
|
||||
});
|
||||
|
||||
test('should aggregate only numbers', () => {
|
||||
test('should throw when metric value is not number', () => {
|
||||
const testMetricNames = new Set(['metric1', 'metric2']);
|
||||
const metrics = new EvaluationMetrics(testMetricNames);
|
||||
|
||||
metrics.addResults({ metric1: 1, metric2: 0 });
|
||||
metrics.addResults({ metric1: '0.5', metric2: 0.2 });
|
||||
metrics.addResults({ metric1: 'not a number', metric2: [1, 2, 3] });
|
||||
|
||||
const aggregatedUpMetrics = metrics.getAggregatedMetrics();
|
||||
|
||||
expect(aggregatedUpMetrics).toEqual({ metric1: 1, metric2: 0.1 });
|
||||
expect(() => metrics.addResults({ metric1: 1, metric2: 0 })).not.toThrow();
|
||||
expect(() => metrics.addResults({ metric1: '0.5', metric2: 0.2 })).toThrow('INVALID_METRICS');
|
||||
expect(() => metrics.addResults({ metric1: 'not a number', metric2: [1, 2, 3] })).toThrow(
|
||||
'INVALID_METRICS',
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle missing values', () => {
|
||||
test('should throw when missing values', () => {
|
||||
const testMetricNames = new Set(['metric1', 'metric2']);
|
||||
const metrics = new EvaluationMetrics(testMetricNames);
|
||||
|
||||
metrics.addResults({ metric1: 1 });
|
||||
metrics.addResults({ metric2: 0.2 });
|
||||
|
||||
const aggregatedMetrics = metrics.getAggregatedMetrics();
|
||||
|
||||
expect(aggregatedMetrics).toEqual({ metric1: 1, metric2: 0.2 });
|
||||
expect(() => metrics.addResults({ metric1: 1 })).toThrow('METRICS_MISSING');
|
||||
expect(() => metrics.addResults({ metric2: 0.2 })).toThrow('METRICS_MISSING');
|
||||
});
|
||||
|
||||
test('should handle empty metrics', () => {
|
||||
|
@ -69,4 +63,19 @@ describe('EvaluationMetrics', () => {
|
|||
|
||||
expect(aggregatedMetrics).toEqual({ metric1: 0.75 });
|
||||
});
|
||||
|
||||
test('should report info on added metrics', () => {
|
||||
const testMetricNames = new Set(['metric1']);
|
||||
const metrics = new EvaluationMetrics(testMetricNames);
|
||||
let info;
|
||||
|
||||
expect(() => (info = metrics.addResults({ metric1: 1, metric2: 0 }))).not.toThrow();
|
||||
|
||||
expect(info).toBeDefined();
|
||||
expect(info).toHaveProperty('unknownMetrics');
|
||||
expect(info!.unknownMetrics).toEqual(new Set(['metric2']));
|
||||
|
||||
expect(info).toHaveProperty('addedMetrics');
|
||||
expect(info!.addedMetrics).toEqual({ metric1: 1 });
|
||||
});
|
||||
});
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
import { mock } from 'jest-mock-extended';
|
||||
|
||||
import type { TestCaseExecution } from '@/databases/entities/test-case-execution.ee';
|
||||
import { getTestRunFinalResult } from '@/evaluation.ee/test-runner/utils.ee';
|
||||
|
||||
function mockTestCaseExecutions(statuses: Array<TestCaseExecution['status']>) {
|
||||
return statuses.map((status) => mock<TestCaseExecution>({ status }));
|
||||
}
|
||||
|
||||
describe('getTestRunFinalResult', () => {
|
||||
test('should return success if all test cases are successful', () => {
|
||||
const result = getTestRunFinalResult(
|
||||
mockTestCaseExecutions(['success', 'success', 'success', 'success', 'success']),
|
||||
);
|
||||
|
||||
expect(result).toEqual('success');
|
||||
});
|
||||
|
||||
test('should return error if at least one test case is errored', () => {
|
||||
const result = getTestRunFinalResult(
|
||||
mockTestCaseExecutions(['success', 'error', 'success', 'success', 'success']),
|
||||
);
|
||||
|
||||
expect(result).toEqual('error');
|
||||
});
|
||||
|
||||
test('should return warning if at least one test case is warned', () => {
|
||||
const result = getTestRunFinalResult(
|
||||
mockTestCaseExecutions(['success', 'warning', 'success', 'success', 'success']),
|
||||
);
|
||||
|
||||
expect(result).toEqual('warning');
|
||||
});
|
||||
|
||||
test('should return error if there are errors and warnings', () => {
|
||||
const result = getTestRunFinalResult(
|
||||
mockTestCaseExecutions(['success', 'error', 'warning', 'success', 'success']),
|
||||
);
|
||||
|
||||
expect(result).toEqual('error');
|
||||
});
|
||||
});
|
|
@ -0,0 +1,171 @@
|
|||
{
|
||||
"startData": {},
|
||||
"resultData": {
|
||||
"runData": {
|
||||
"Manual Run": [
|
||||
{
|
||||
"hints": [],
|
||||
"startTime": 1731079118048,
|
||||
"executionTime": 0,
|
||||
"source": [],
|
||||
"executionStatus": "success",
|
||||
"data": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"json": {
|
||||
"query": "First item"
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"query": "Second item"
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"query": "Third item"
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"Set Attribute": [
|
||||
{
|
||||
"hints": [],
|
||||
"startTime": 1731079118049,
|
||||
"executionTime": 0,
|
||||
"source": [
|
||||
{
|
||||
"previousNode": "Manual Run"
|
||||
}
|
||||
],
|
||||
"executionStatus": "success",
|
||||
"data": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"json": {
|
||||
"foo": "bar"
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"foo": "bar"
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 1
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"foo": "bar"
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 2
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"Code": [
|
||||
{
|
||||
"hints": [],
|
||||
"startTime": 1731079118049,
|
||||
"executionTime": 3,
|
||||
"source": [
|
||||
{
|
||||
"previousNode": "Set Attribute"
|
||||
}
|
||||
],
|
||||
"executionStatus": "success",
|
||||
"data": {
|
||||
"main": [
|
||||
[
|
||||
{
|
||||
"json": {
|
||||
"foo": "bar",
|
||||
"random": 0.6315509336851373
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"foo": "bar",
|
||||
"random": 0.3336315687359024
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 1
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"foo": "bar",
|
||||
"random": 0.4241870158917733
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 2
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"pinData": {
|
||||
"Manual Run": [
|
||||
{
|
||||
"json": {
|
||||
"query": "First item"
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"query": "Second item"
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"query": "Third item"
|
||||
},
|
||||
"pairedItem": {
|
||||
"item": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"lastNodeExecuted": "Code"
|
||||
},
|
||||
"executionData": {
|
||||
"contextData": {},
|
||||
"nodeExecutionStack": [],
|
||||
"metadata": {},
|
||||
"waitingExecution": {},
|
||||
"waitingExecutionSource": {}
|
||||
}
|
||||
}
|
|
@ -4,6 +4,7 @@ import { readFileSync } from 'fs';
|
|||
import { mock, mockDeep } from 'jest-mock-extended';
|
||||
import type { ErrorReporter } from 'n8n-core';
|
||||
import type { ExecutionError, GenericValue, IRun } from 'n8n-workflow';
|
||||
import type { ITaskData } from 'n8n-workflow';
|
||||
import path from 'path';
|
||||
|
||||
import type { ActiveExecutions } from '@/active-executions';
|
||||
|
@ -54,6 +55,12 @@ const executionDataJson = JSON.parse(
|
|||
readFileSync(path.join(__dirname, './mock-data/execution-data.json'), { encoding: 'utf-8' }),
|
||||
);
|
||||
|
||||
const executionDataRenamedNodesJson = JSON.parse(
|
||||
readFileSync(path.join(__dirname, './mock-data/execution-data-renamed-nodes.json'), {
|
||||
encoding: 'utf-8',
|
||||
}),
|
||||
);
|
||||
|
||||
const executionDataMultipleTriggersJson = JSON.parse(
|
||||
readFileSync(path.join(__dirname, './mock-data/execution-data.multiple-triggers.json'), {
|
||||
encoding: 'utf-8',
|
||||
|
@ -81,7 +88,7 @@ const executionMocks = [
|
|||
workflowId: 'workflow-under-test-id',
|
||||
status: 'success',
|
||||
executionData: {
|
||||
data: stringify(executionDataJson),
|
||||
data: stringify(executionDataRenamedNodesJson),
|
||||
workflowData: wfUnderTestRenamedNodesJson,
|
||||
},
|
||||
}),
|
||||
|
@ -91,7 +98,12 @@ function mockExecutionData() {
|
|||
return mock<IRun>({
|
||||
data: {
|
||||
resultData: {
|
||||
runData: {},
|
||||
runData: {
|
||||
'When clicking ‘Test workflow’': mock<ITaskData[]>(),
|
||||
},
|
||||
// error is an optional prop, but jest-mock-extended will mock it by default,
|
||||
// which affects the code logic. So, we need to explicitly set it to undefined.
|
||||
error: undefined,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
@ -292,7 +304,7 @@ describe('TestRunnerService', () => {
|
|||
|
||||
activeExecutions.getPostExecutePromise
|
||||
.calledWith('some-execution-id-4')
|
||||
.mockResolvedValue(mockEvaluationExecutionData({ metric1: 0.5 }));
|
||||
.mockResolvedValue(mockEvaluationExecutionData({ metric1: 0.5, metric2: 100 }));
|
||||
|
||||
await testRunnerService.runTest(
|
||||
mock<User>(),
|
||||
|
@ -322,7 +334,7 @@ describe('TestRunnerService', () => {
|
|||
// Check evaluation workflow was executed
|
||||
expect(workflowRunner.run).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
executionMode: 'evaluation',
|
||||
executionMode: 'integrated',
|
||||
executionData: expect.objectContaining({
|
||||
executionData: expect.objectContaining({
|
||||
nodeExecutionStack: expect.arrayContaining([
|
||||
|
@ -343,7 +355,7 @@ describe('TestRunnerService', () => {
|
|||
expect(testRunRepository.markAsCompleted).toHaveBeenCalledTimes(1);
|
||||
expect(testRunRepository.markAsCompleted).toHaveBeenCalledWith('test-run-id', {
|
||||
metric1: 0.75,
|
||||
metric2: 0,
|
||||
metric2: 50,
|
||||
});
|
||||
|
||||
expect(testRunRepository.incrementPassed).toHaveBeenCalledTimes(2);
|
||||
|
@ -624,6 +636,7 @@ describe('TestRunnerService', () => {
|
|||
const startNodesData = (testRunnerService as any).getStartNodesData(
|
||||
wfMultipleTriggersJson,
|
||||
executionDataMultipleTriggersJson,
|
||||
wfMultipleTriggersJson, // Test case where workflow didn't change
|
||||
);
|
||||
|
||||
expect(startNodesData).toEqual({
|
||||
|
@ -652,6 +665,7 @@ describe('TestRunnerService', () => {
|
|||
const startNodesData = (testRunnerService as any).getStartNodesData(
|
||||
wfMultipleTriggersJson,
|
||||
executionDataMultipleTriggersJson2,
|
||||
wfMultipleTriggersJson, // Test case where workflow didn't change
|
||||
);
|
||||
|
||||
expect(startNodesData).toEqual({
|
||||
|
@ -662,6 +676,35 @@ describe('TestRunnerService', () => {
|
|||
});
|
||||
});
|
||||
|
||||
test('should properly choose trigger when it was renamed', async () => {
|
||||
const testRunnerService = new TestRunnerService(
|
||||
logger,
|
||||
telemetry,
|
||||
workflowRepository,
|
||||
workflowRunner,
|
||||
executionRepository,
|
||||
activeExecutions,
|
||||
testRunRepository,
|
||||
testCaseExecutionRepository,
|
||||
testMetricRepository,
|
||||
mockNodeTypes,
|
||||
errorReporter,
|
||||
);
|
||||
|
||||
const startNodesData = (testRunnerService as any).getStartNodesData(
|
||||
wfUnderTestRenamedNodesJson, // Test case where workflow didn't change
|
||||
executionDataJson,
|
||||
wfUnderTestJson,
|
||||
);
|
||||
|
||||
expect(startNodesData).toEqual({
|
||||
startNodes: expect.arrayContaining([expect.objectContaining({ name: 'Set attribute' })]),
|
||||
triggerToStartFrom: expect.objectContaining({
|
||||
name: 'Manual Run',
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
describe('Test Run cancellation', () => {
|
||||
beforeAll(() => {
|
||||
jest.useFakeTimers();
|
||||
|
|
39
packages/cli/src/evaluation.ee/test-runner/errors.ee.ts
Normal file
39
packages/cli/src/evaluation.ee/test-runner/errors.ee.ts
Normal file
|
@ -0,0 +1,39 @@
|
|||
import { ApplicationError } from 'n8n-workflow';
|
||||
|
||||
export type TestCaseExecutionErrorCode =
|
||||
| 'MOCKED_NODE_DOES_NOT_EXIST'
|
||||
| 'TRIGGER_NO_LONGER_EXISTS'
|
||||
| 'FAILED_TO_EXECUTE_WORKFLOW'
|
||||
| 'EVALUATION_WORKFLOW_DOES_NOT_EXIST'
|
||||
| 'FAILED_TO_EXECUTE_EVALUATION_WORKFLOW'
|
||||
| 'METRICS_MISSING'
|
||||
| 'UNKNOWN_METRICS'
|
||||
| 'INVALID_METRICS'
|
||||
| 'PAYLOAD_LIMIT_EXCEEDED'
|
||||
| 'UNKNOWN_ERROR';
|
||||
|
||||
export class TestCaseExecutionError extends ApplicationError {
|
||||
readonly code: TestCaseExecutionErrorCode;
|
||||
|
||||
constructor(code: TestCaseExecutionErrorCode, extra: Record<string, unknown> = {}) {
|
||||
super('Test Case execution failed with code ' + code, { extra });
|
||||
|
||||
this.code = code;
|
||||
}
|
||||
}
|
||||
|
||||
export type TestRunErrorCode =
|
||||
| 'PAST_EXECUTIONS_NOT_FOUND'
|
||||
| 'EVALUATION_WORKFLOW_NOT_FOUND'
|
||||
| 'INTERRUPTED'
|
||||
| 'UNKNOWN_ERROR';
|
||||
|
||||
export class TestRunError extends ApplicationError {
|
||||
readonly code: TestRunErrorCode;
|
||||
|
||||
constructor(code: TestRunErrorCode, extra: Record<string, unknown> = {}) {
|
||||
super('Test Run failed with code ' + code, { extra });
|
||||
|
||||
this.code = code;
|
||||
}
|
||||
}
|
|
@ -1,5 +1,15 @@
|
|||
import difference from 'lodash/difference';
|
||||
import type { IDataObject } from 'n8n-workflow';
|
||||
|
||||
import { TestCaseExecutionError } from '@/evaluation.ee/test-runner/errors.ee';
|
||||
|
||||
export interface EvaluationMetricsAddResultsInfo {
|
||||
addedMetrics: Record<string, number>;
|
||||
missingMetrics: Set<string>;
|
||||
unknownMetrics: Set<string>;
|
||||
incorrectTypeMetrics: Set<string>;
|
||||
}
|
||||
|
||||
export class EvaluationMetrics {
|
||||
private readonly rawMetricsByName = new Map<string, number[]>();
|
||||
|
||||
|
@ -9,17 +19,41 @@ export class EvaluationMetrics {
|
|||
}
|
||||
}
|
||||
|
||||
addResults(result: IDataObject): Record<string, number> {
|
||||
const addedMetrics: Record<string, number> = {};
|
||||
addResults(result: IDataObject): EvaluationMetricsAddResultsInfo {
|
||||
const addResultsInfo: EvaluationMetricsAddResultsInfo = {
|
||||
addedMetrics: {},
|
||||
missingMetrics: new Set<string>(),
|
||||
unknownMetrics: new Set<string>(),
|
||||
incorrectTypeMetrics: new Set<string>(),
|
||||
};
|
||||
|
||||
for (const [metricName, metricValue] of Object.entries(result)) {
|
||||
if (typeof metricValue === 'number' && this.metricNames.has(metricName)) {
|
||||
addedMetrics[metricName] = metricValue;
|
||||
this.rawMetricsByName.get(metricName)!.push(metricValue);
|
||||
if (this.metricNames.has(metricName)) {
|
||||
if (typeof metricValue === 'number') {
|
||||
addResultsInfo.addedMetrics[metricName] = metricValue;
|
||||
this.rawMetricsByName.get(metricName)!.push(metricValue);
|
||||
} else {
|
||||
throw new TestCaseExecutionError('INVALID_METRICS', {
|
||||
metricName,
|
||||
metricValue,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
addResultsInfo.unknownMetrics.add(metricName);
|
||||
}
|
||||
}
|
||||
|
||||
return addedMetrics;
|
||||
// Check that result contains all expected metrics
|
||||
if (
|
||||
difference(Array.from(this.metricNames), Object.keys(addResultsInfo.addedMetrics)).length > 0
|
||||
) {
|
||||
throw new TestCaseExecutionError('METRICS_MISSING', {
|
||||
expectedMetrics: Array.from(this.metricNames).sort(),
|
||||
receivedMetrics: Object.keys(addResultsInfo.addedMetrics).sort(),
|
||||
});
|
||||
}
|
||||
|
||||
return addResultsInfo;
|
||||
}
|
||||
|
||||
getAggregatedMetrics() {
|
||||
|
|
|
@ -24,6 +24,7 @@ import { TestMetricRepository } from '@/databases/repositories/test-metric.repos
|
|||
import { TestRunRepository } from '@/databases/repositories/test-run.repository.ee';
|
||||
import { WorkflowRepository } from '@/databases/repositories/workflow.repository';
|
||||
import * as Db from '@/db';
|
||||
import { TestCaseExecutionError, TestRunError } from '@/evaluation.ee/test-runner/errors.ee';
|
||||
import { NodeTypes } from '@/node-types';
|
||||
import { Telemetry } from '@/telemetry';
|
||||
import { getRunData } from '@/workflow-execute-additional-data';
|
||||
|
@ -67,12 +68,21 @@ export class TestRunnerService {
|
|||
private readonly errorReporter: ErrorReporter,
|
||||
) {}
|
||||
|
||||
/**
|
||||
* As Test Runner does not have a recovery mechanism, it can not resume Test Runs interrupted by the server restart.
|
||||
* All Test Runs in incomplete state will be marked as cancelled.
|
||||
*/
|
||||
async cleanupIncompleteRuns() {
|
||||
await this.testRunRepository.markAllIncompleteAsFailed();
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the start nodes and trigger node data props for the `workflowRunner.run` method input.
|
||||
*/
|
||||
private getStartNodesData(
|
||||
workflow: WorkflowEntity,
|
||||
pastExecutionData: IRunExecutionData,
|
||||
pastExecutionWorkflowData: IWorkflowBase,
|
||||
): Pick<IWorkflowExecutionDataProcess, 'startNodes' | 'triggerToStartFrom'> {
|
||||
// Create a new workflow instance to use the helper functions (getChildNodes)
|
||||
const workflowInstance = new Workflow({
|
||||
|
@ -82,21 +92,38 @@ export class TestRunnerService {
|
|||
nodeTypes: this.nodeTypes,
|
||||
});
|
||||
|
||||
// Create a map between node IDs and node names for the past workflow
|
||||
const pastWorkflowNodeIdByName = new Map(
|
||||
pastExecutionWorkflowData.nodes.map((node) => [node.name, node.id]),
|
||||
);
|
||||
|
||||
// Create a map between node names and IDs for the up-to-date workflow
|
||||
const workflowNodeNameById = new Map(workflow.nodes.map((node) => [node.id, node.name]));
|
||||
|
||||
// Determine the trigger node of the past execution
|
||||
const pastExecutionTriggerNode = getPastExecutionTriggerNode(pastExecutionData);
|
||||
assert(pastExecutionTriggerNode, 'Could not find the trigger node of the past execution');
|
||||
|
||||
const pastExecutionTriggerNodeId = pastWorkflowNodeIdByName.get(pastExecutionTriggerNode);
|
||||
assert(pastExecutionTriggerNodeId, 'Could not find the trigger node ID of the past execution');
|
||||
|
||||
// Check the trigger is still present in the workflow
|
||||
const triggerNode = workflowNodeNameById.get(pastExecutionTriggerNodeId);
|
||||
if (!triggerNode) {
|
||||
throw new TestCaseExecutionError('TRIGGER_NO_LONGER_EXISTS');
|
||||
}
|
||||
|
||||
const triggerNodeData = pastExecutionData.resultData.runData[pastExecutionTriggerNode][0];
|
||||
assert(triggerNodeData, 'Trigger node data not found');
|
||||
|
||||
const triggerToStartFrom = {
|
||||
name: pastExecutionTriggerNode,
|
||||
name: triggerNode,
|
||||
data: triggerNodeData,
|
||||
};
|
||||
|
||||
// Start nodes are the nodes that are connected to the trigger node
|
||||
const startNodes = workflowInstance
|
||||
.getChildNodes(pastExecutionTriggerNode, NodeConnectionType.Main, 1)
|
||||
.getChildNodes(triggerNode, NodeConnectionType.Main, 1)
|
||||
.map((nodeName) => ({
|
||||
name: nodeName,
|
||||
sourceData: { previousNode: pastExecutionTriggerNode },
|
||||
|
@ -135,7 +162,7 @@ export class TestRunnerService {
|
|||
|
||||
// Prepare the data to run the workflow
|
||||
const data: IWorkflowExecutionDataProcess = {
|
||||
...this.getStartNodesData(workflow, pastExecutionData),
|
||||
...this.getStartNodesData(workflow, pastExecutionData, pastExecutionWorkflowData),
|
||||
executionMode: 'evaluation',
|
||||
runData: {},
|
||||
pinData,
|
||||
|
@ -154,11 +181,11 @@ export class TestRunnerService {
|
|||
});
|
||||
|
||||
// Update status of the test run execution mapping
|
||||
await this.testCaseExecutionRepository.markAsRunning(
|
||||
metadata.testRunId,
|
||||
metadata.pastExecutionId,
|
||||
await this.testCaseExecutionRepository.markAsRunning({
|
||||
testRunId: metadata.testRunId,
|
||||
pastExecutionId: metadata.pastExecutionId,
|
||||
executionId,
|
||||
);
|
||||
});
|
||||
|
||||
// Wait for the execution to finish
|
||||
const executePromise = this.activeExecutions.getPostExecutePromise(executionId);
|
||||
|
@ -192,7 +219,7 @@ export class TestRunnerService {
|
|||
|
||||
// Prepare the data to run the evaluation workflow
|
||||
const data = await getRunData(evaluationWorkflow, [evaluationInputData]);
|
||||
data.executionMode = 'evaluation';
|
||||
data.executionMode = 'integrated';
|
||||
|
||||
// Trigger the evaluation workflow
|
||||
const executionId = await this.workflowRunner.run(data);
|
||||
|
@ -204,11 +231,11 @@ export class TestRunnerService {
|
|||
});
|
||||
|
||||
// Update status of the test run execution mapping
|
||||
await this.testCaseExecutionRepository.markAsEvaluationRunning(
|
||||
metadata.testRunId,
|
||||
metadata.pastExecutionId,
|
||||
executionId,
|
||||
);
|
||||
await this.testCaseExecutionRepository.markAsEvaluationRunning({
|
||||
testRunId: metadata.testRunId,
|
||||
pastExecutionId: metadata.pastExecutionId,
|
||||
evaluationExecutionId: executionId,
|
||||
});
|
||||
|
||||
// Wait for the execution to finish
|
||||
const executePromise = this.activeExecutions.getPostExecutePromise(executionId);
|
||||
|
@ -256,9 +283,6 @@ export class TestRunnerService {
|
|||
const workflow = await this.workflowRepository.findById(test.workflowId);
|
||||
assert(workflow, 'Workflow not found');
|
||||
|
||||
const evaluationWorkflow = await this.workflowRepository.findById(test.evaluationWorkflowId);
|
||||
assert(evaluationWorkflow, 'Evaluation workflow not found');
|
||||
|
||||
// 0. Create new Test Run
|
||||
const testRun = await this.testRunRepository.createTestRun(test.id);
|
||||
assert(testRun, 'Unable to create a test run');
|
||||
|
@ -276,6 +300,12 @@ export class TestRunnerService {
|
|||
|
||||
const abortSignal = abortController.signal;
|
||||
try {
|
||||
// Get the evaluation workflow
|
||||
const evaluationWorkflow = await this.workflowRepository.findById(test.evaluationWorkflowId);
|
||||
if (!evaluationWorkflow) {
|
||||
throw new TestRunError('EVALUATION_WORKFLOW_NOT_FOUND');
|
||||
}
|
||||
|
||||
///
|
||||
// 1. Make test cases from previous executions
|
||||
///
|
||||
|
@ -294,6 +324,10 @@ export class TestRunnerService {
|
|||
|
||||
this.logger.debug('Found past executions', { count: pastExecutions.length });
|
||||
|
||||
if (pastExecutions.length === 0) {
|
||||
throw new TestRunError('PAST_EXECUTIONS_NOT_FOUND');
|
||||
}
|
||||
|
||||
// Add all past executions mappings to the test run.
|
||||
// This will be used to track the status of each test case and keep the connection between test run and all related executions (past, current, and evaluation).
|
||||
await this.testCaseExecutionRepository.createBatch(
|
||||
|
@ -365,20 +399,20 @@ export class TestRunnerService {
|
|||
this.logger.debug('Test case execution finished', { pastExecutionId });
|
||||
|
||||
// In case of a permission check issue, the test case execution will be undefined.
|
||||
// Skip them, increment the failed count and continue with the next test case
|
||||
if (!testCaseExecution) {
|
||||
// If that happens, or if the test case execution produced an error, mark the test case as failed.
|
||||
if (!testCaseExecution || testCaseExecution.data.resultData.error) {
|
||||
await Db.transaction(async (trx) => {
|
||||
await this.testRunRepository.incrementFailed(testRun.id, trx);
|
||||
await this.testCaseExecutionRepository.markAsFailed(testRun.id, pastExecutionId, trx);
|
||||
await this.testCaseExecutionRepository.markAsFailed({
|
||||
testRunId: testRun.id,
|
||||
pastExecutionId,
|
||||
errorCode: 'FAILED_TO_EXECUTE_WORKFLOW',
|
||||
trx,
|
||||
});
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
// Update status of the test case execution mapping entry in case of an error
|
||||
if (testCaseExecution.data.resultData.error) {
|
||||
await this.testCaseExecutionRepository.markAsFailed(testRun.id, pastExecutionId);
|
||||
}
|
||||
|
||||
// Collect the results of the test case execution
|
||||
const testCaseRunData = testCaseExecution.data.resultData.runData;
|
||||
|
||||
|
@ -398,32 +432,67 @@ export class TestRunnerService {
|
|||
this.logger.debug('Evaluation execution finished', { pastExecutionId });
|
||||
|
||||
// Extract the output of the last node executed in the evaluation workflow
|
||||
const addedMetrics = metrics.addResults(this.extractEvaluationResult(evalExecution));
|
||||
const { addedMetrics, unknownMetrics } = metrics.addResults(
|
||||
this.extractEvaluationResult(evalExecution),
|
||||
);
|
||||
|
||||
if (evalExecution.data.resultData.error) {
|
||||
await Db.transaction(async (trx) => {
|
||||
await this.testRunRepository.incrementFailed(testRun.id, trx);
|
||||
await this.testCaseExecutionRepository.markAsFailed(testRun.id, pastExecutionId, trx);
|
||||
await this.testCaseExecutionRepository.markAsFailed({
|
||||
testRunId: testRun.id,
|
||||
pastExecutionId,
|
||||
errorCode: 'FAILED_TO_EXECUTE_EVALUATION_WORKFLOW',
|
||||
trx,
|
||||
});
|
||||
});
|
||||
} else {
|
||||
await Db.transaction(async (trx) => {
|
||||
await this.testRunRepository.incrementPassed(testRun.id, trx);
|
||||
await this.testCaseExecutionRepository.markAsCompleted(
|
||||
testRun.id,
|
||||
pastExecutionId,
|
||||
addedMetrics,
|
||||
trx,
|
||||
);
|
||||
|
||||
// Add warning if the evaluation workflow produced an unknown metric
|
||||
if (unknownMetrics.size > 0) {
|
||||
await this.testCaseExecutionRepository.markAsWarning({
|
||||
testRunId: testRun.id,
|
||||
pastExecutionId,
|
||||
errorCode: 'UNKNOWN_METRICS',
|
||||
errorDetails: { unknownMetrics: Array.from(unknownMetrics) },
|
||||
});
|
||||
} else {
|
||||
await this.testCaseExecutionRepository.markAsCompleted({
|
||||
testRunId: testRun.id,
|
||||
pastExecutionId,
|
||||
metrics: addedMetrics,
|
||||
trx,
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
// In case of an unexpected error, increment the failed count and continue with the next test case
|
||||
await Db.transaction(async (trx) => {
|
||||
await this.testRunRepository.incrementFailed(testRun.id, trx);
|
||||
await this.testCaseExecutionRepository.markAsFailed(testRun.id, pastExecutionId, trx);
|
||||
});
|
||||
|
||||
this.errorReporter.error(e);
|
||||
if (e instanceof TestCaseExecutionError) {
|
||||
await this.testCaseExecutionRepository.markAsFailed({
|
||||
testRunId: testRun.id,
|
||||
pastExecutionId,
|
||||
errorCode: e.code,
|
||||
errorDetails: e.extra as IDataObject,
|
||||
trx,
|
||||
});
|
||||
} else {
|
||||
await this.testCaseExecutionRepository.markAsFailed({
|
||||
testRunId: testRun.id,
|
||||
pastExecutionId,
|
||||
errorCode: 'UNKNOWN_ERROR',
|
||||
trx,
|
||||
});
|
||||
|
||||
// Report unexpected errors
|
||||
this.errorReporter.error(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -435,6 +504,7 @@ export class TestRunnerService {
|
|||
});
|
||||
} else {
|
||||
const aggregatedMetrics = metrics.getAggregatedMetrics();
|
||||
|
||||
await this.testRunRepository.markAsCompleted(testRun.id, aggregatedMetrics);
|
||||
|
||||
this.logger.debug('Test run finished', { testId: test.id });
|
||||
|
@ -450,7 +520,10 @@ export class TestRunnerService {
|
|||
await this.testRunRepository.markAsCancelled(testRun.id, trx);
|
||||
await this.testCaseExecutionRepository.markAllPendingAsCancelled(testRun.id, trx);
|
||||
});
|
||||
} else if (e instanceof TestRunError) {
|
||||
await this.testRunRepository.markAsError(testRun.id, e.code, e.extra as IDataObject);
|
||||
} else {
|
||||
await this.testRunRepository.markAsError(testRun.id, 'UNKNOWN_ERROR');
|
||||
throw e;
|
||||
}
|
||||
} finally {
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
import assert from 'assert';
|
||||
import type { IRunExecutionData, IPinData, IWorkflowBase } from 'n8n-workflow';
|
||||
|
||||
import type { TestCaseExecution } from '@/databases/entities/test-case-execution.ee';
|
||||
import type { MockedNodeItem } from '@/databases/entities/test-definition.ee';
|
||||
import type { WorkflowEntity } from '@/databases/entities/workflow-entity';
|
||||
import type { TestRunFinalResult } from '@/databases/repositories/test-run.repository.ee';
|
||||
import { TestCaseExecutionError } from '@/evaluation.ee/test-runner/errors.ee';
|
||||
|
||||
/**
|
||||
* Extracts the execution data from the past execution
|
||||
|
@ -41,6 +44,8 @@ export function createPinData(
|
|||
|
||||
if (nodeData?.[0]?.data?.main?.[0]) {
|
||||
pinData[nodeName] = nodeData[0]?.data?.main?.[0];
|
||||
} else {
|
||||
throw new TestCaseExecutionError('MOCKED_NODE_DOES_NOT_EXIST');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -58,3 +63,31 @@ export function getPastExecutionTriggerNode(executionData: IRunExecutionData) {
|
|||
return !data[0].source || data[0].source.length === 0 || data[0].source[0] === null;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the final result of the test run based on the test case executions.
|
||||
* The final result is the most severe status among all test case executions' statuses.
|
||||
*/
|
||||
export function getTestRunFinalResult(testCaseExecutions: TestCaseExecution[]): TestRunFinalResult {
|
||||
// Priority of statuses: error > warning > success
|
||||
const severityMap: Record<TestRunFinalResult, number> = {
|
||||
error: 3,
|
||||
warning: 2,
|
||||
success: 1,
|
||||
};
|
||||
|
||||
let finalResult: TestRunFinalResult = 'success';
|
||||
|
||||
for (const testCaseExecution of testCaseExecutions) {
|
||||
if (['error', 'warning'].includes(testCaseExecution.status)) {
|
||||
if (
|
||||
testCaseExecution.status in severityMap &&
|
||||
severityMap[testCaseExecution.status as TestRunFinalResult] > severityMap[finalResult]
|
||||
) {
|
||||
finalResult = testCaseExecution.status as TestRunFinalResult;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return finalResult;
|
||||
}
|
||||
|
|
|
@ -73,9 +73,11 @@ export class TestRunsController {
|
|||
|
||||
@Get('/:testDefinitionId/runs/:id')
|
||||
async getOne(req: TestRunsRequest.GetOne) {
|
||||
const { testDefinitionId, id } = req.params;
|
||||
|
||||
await this.getTestDefinition(req);
|
||||
|
||||
return await this.getTestRun(req);
|
||||
return await this.testRunRepository.getTestRunSummaryById(testDefinitionId, id);
|
||||
}
|
||||
|
||||
@Get('/:testDefinitionId/runs/:id/cases')
|
||||
|
|
|
@ -43,12 +43,15 @@ export interface UpdateTestResponse {
|
|||
export interface TestRunRecord {
|
||||
id: string;
|
||||
testDefinitionId: string;
|
||||
status: 'new' | 'running' | 'completed' | 'error' | 'cancelled';
|
||||
status: 'new' | 'running' | 'completed' | 'error' | 'cancelled' | 'warning' | 'success';
|
||||
metrics?: Record<string, number>;
|
||||
createdAt: string;
|
||||
updatedAt: string;
|
||||
runAt: string;
|
||||
completedAt: string;
|
||||
errorCode?: string;
|
||||
errorDetails?: Record<string, unknown>;
|
||||
finalResult?: 'success' | 'error' | 'warning';
|
||||
}
|
||||
|
||||
interface GetTestRunParams {
|
||||
|
|
|
@ -22,6 +22,17 @@ const locale = useI18n();
|
|||
const navigateToRunDetail = (run: TestRunRecord) => emit('getRunDetail', run);
|
||||
const selectedRows = ref<TestRunRecord[]>([]);
|
||||
|
||||
// Combine test run statuses and finalResult to get the final status
|
||||
const runSummaries = computed(() => {
|
||||
return props.runs.map(({ status, finalResult, ...run }) => {
|
||||
if (status === 'completed' && finalResult) {
|
||||
return { ...run, status: finalResult };
|
||||
}
|
||||
|
||||
return { ...run, status };
|
||||
});
|
||||
});
|
||||
|
||||
const metrics = computed(() => {
|
||||
return props.runs.reduce((acc, run) => {
|
||||
const metricKeys = Object.keys(run.metrics ?? {});
|
||||
|
@ -29,6 +40,19 @@ const metrics = computed(() => {
|
|||
}, [] as string[]);
|
||||
});
|
||||
|
||||
const getErrorTooltipLinkRoute = (row: TestRunRecord) => {
|
||||
if (row.errorCode === 'EVALUATION_WORKFLOW_NOT_FOUND') {
|
||||
return {
|
||||
name: VIEWS.TEST_DEFINITION_EDIT,
|
||||
params: {
|
||||
testId: row.testDefinitionId,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const columns = computed((): Array<TestTableColumn<TestRunRecord>> => {
|
||||
return [
|
||||
{
|
||||
|
@ -51,15 +75,17 @@ const columns = computed((): Array<TestTableColumn<TestRunRecord>> => {
|
|||
{ text: locale.baseText('testDefinition.listRuns.status.error'), value: 'error' },
|
||||
{ text: locale.baseText('testDefinition.listRuns.status.cancelled'), value: 'cancelled' },
|
||||
],
|
||||
errorRoute: getErrorTooltipLinkRoute,
|
||||
filterMethod: (value: string, row: TestRunRecord) => row.status === value,
|
||||
},
|
||||
{
|
||||
prop: 'date',
|
||||
label: locale.baseText('testDefinition.listRuns.runDate'),
|
||||
sortable: true,
|
||||
formatter: (row: TestRunRecord) => convertToDisplayDate(new Date(row.runAt).getTime()),
|
||||
formatter: (row: TestRunRecord) =>
|
||||
convertToDisplayDate(new Date(row.runAt ?? row.createdAt).getTime()),
|
||||
sortMethod: (a: TestRunRecord, b: TestRunRecord) =>
|
||||
new Date(a.runAt).getTime() - new Date(b.runAt).getTime(),
|
||||
new Date(a.runAt ?? a.createdAt).getTime() - new Date(b.runAt ?? b.createdAt).getTime(),
|
||||
},
|
||||
|
||||
...metrics.value.map((metric) => ({
|
||||
|
@ -104,7 +130,7 @@ async function deleteRuns() {
|
|||
</n8n-button>
|
||||
</div>
|
||||
<TestTableBase
|
||||
:data="runs"
|
||||
:data="runSummaries"
|
||||
:columns="columns"
|
||||
selectable
|
||||
@row-click="navigateToRunDetail"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
<script setup lang="ts" generic="T">
|
||||
import { useI18n } from '@/composables/useI18n';
|
||||
import type { TestTableColumn } from './TestTableBase.vue';
|
||||
import { useRouter } from 'vue-router';
|
||||
|
||||
|
@ -12,35 +11,7 @@ defineEmits<{
|
|||
click: [];
|
||||
}>();
|
||||
|
||||
const locale = useI18n();
|
||||
const router = useRouter();
|
||||
interface WithStatus {
|
||||
status: string;
|
||||
}
|
||||
|
||||
function hasStatus(row: unknown): row is WithStatus {
|
||||
return typeof row === 'object' && row !== null && 'status' in row;
|
||||
}
|
||||
|
||||
const statusThemeMap: Record<string, string> = {
|
||||
new: 'default',
|
||||
running: 'warning',
|
||||
evaluation_running: 'warning',
|
||||
completed: 'success',
|
||||
error: 'danger',
|
||||
success: 'success',
|
||||
cancelled: 'default',
|
||||
};
|
||||
|
||||
const statusLabelMap: Record<string, string> = {
|
||||
new: locale.baseText('testDefinition.listRuns.status.new'),
|
||||
running: locale.baseText('testDefinition.listRuns.status.running'),
|
||||
evaluation_running: locale.baseText('testDefinition.listRuns.status.evaluating'),
|
||||
completed: locale.baseText('testDefinition.listRuns.status.completed'),
|
||||
error: locale.baseText('testDefinition.listRuns.status.error'),
|
||||
success: locale.baseText('testDefinition.listRuns.status.success'),
|
||||
cancelled: locale.baseText('testDefinition.listRuns.status.cancelled'),
|
||||
};
|
||||
|
||||
function hasProperty(row: unknown, prop: string): row is Record<string, unknown> {
|
||||
return typeof row === 'object' && row !== null && prop in row;
|
||||
|
@ -64,14 +35,6 @@ const getCellContent = (column: TestTableColumn<T>, row: T) => {
|
|||
</router-link>
|
||||
</div>
|
||||
|
||||
<N8nBadge
|
||||
v-else-if="column.prop === 'status' && hasStatus(row)"
|
||||
:theme="statusThemeMap[row.status]"
|
||||
class="mr-4xs"
|
||||
>
|
||||
{{ statusLabelMap[row.status] }}
|
||||
</N8nBadge>
|
||||
|
||||
<div v-else>
|
||||
{{ getCellContent(column, row) }}
|
||||
</div>
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
<script setup lang="ts" generic="T">
|
||||
import N8nTooltip from 'n8n-design-system/components/N8nTooltip';
|
||||
import type { BaseTextKey } from '@/plugins/i18n';
|
||||
import type { TestTableColumn } from '@/components/TestDefinition/shared/TestTableBase.vue';
|
||||
import { useI18n } from '@/composables/useI18n';
|
||||
import { useRouter } from 'vue-router';
|
||||
|
||||
defineProps<{
|
||||
column: TestTableColumn<T>;
|
||||
row: T & { status: string };
|
||||
}>();
|
||||
|
||||
const locale = useI18n();
|
||||
const router = useRouter();
|
||||
|
||||
interface WithError {
|
||||
errorCode: string;
|
||||
}
|
||||
|
||||
function hasError(row: unknown): row is WithError {
|
||||
return typeof row === 'object' && row !== null && 'errorCode' in row;
|
||||
}
|
||||
|
||||
const errorTooltipMap: Record<string, BaseTextKey> = {
|
||||
// Test case errors
|
||||
MOCKED_NODE_DOES_NOT_EXIST: 'testDefinition.runDetail.error.mockedNodeMissing',
|
||||
FAILED_TO_EXECUTE_EVALUATION_WORKFLOW: 'testDefinition.runDetail.error.evaluationFailed',
|
||||
FAILED_TO_EXECUTE_WORKFLOW: 'testDefinition.runDetail.error.executionFailed',
|
||||
TRIGGER_NO_LONGER_EXISTS: 'testDefinition.runDetail.error.triggerNoLongerExists',
|
||||
METRICS_MISSING: 'testDefinition.runDetail.error.metricsMissing',
|
||||
UNKNOWN_METRICS: 'testDefinition.runDetail.error.unknownMetrics',
|
||||
INVALID_METRICS: 'testDefinition.runDetail.error.invalidMetrics',
|
||||
|
||||
// Test run errors
|
||||
PAST_EXECUTIONS_NOT_FOUND: 'testDefinition.listRuns.error.noPastExecutions',
|
||||
EVALUATION_WORKFLOW_NOT_FOUND: 'testDefinition.listRuns.error.evaluationWorkflowNotFound',
|
||||
};
|
||||
|
||||
// FIXME: move status logic to a parent component
|
||||
const statusThemeMap: Record<string, string> = {
|
||||
new: 'default',
|
||||
running: 'warning',
|
||||
evaluation_running: 'warning',
|
||||
completed: 'success',
|
||||
error: 'danger',
|
||||
success: 'success',
|
||||
warning: 'warning',
|
||||
cancelled: 'default',
|
||||
};
|
||||
|
||||
const statusLabelMap: Record<string, string> = {
|
||||
new: locale.baseText('testDefinition.listRuns.status.new'),
|
||||
running: locale.baseText('testDefinition.listRuns.status.running'),
|
||||
evaluation_running: locale.baseText('testDefinition.listRuns.status.evaluating'),
|
||||
completed: locale.baseText('testDefinition.listRuns.status.completed'),
|
||||
error: locale.baseText('testDefinition.listRuns.status.error'),
|
||||
success: locale.baseText('testDefinition.listRuns.status.success'),
|
||||
warning: locale.baseText('testDefinition.listRuns.status.warning'),
|
||||
cancelled: locale.baseText('testDefinition.listRuns.status.cancelled'),
|
||||
};
|
||||
|
||||
function getErrorTooltip(column: TestTableColumn<T>, row: T): string | undefined {
|
||||
if (hasError(row) && errorTooltipMap[row.errorCode]) {
|
||||
const tooltipLinkUrl = getErrorTooltipUrl(column, row);
|
||||
|
||||
if (tooltipLinkUrl) {
|
||||
return locale.baseText(errorTooltipMap[row.errorCode], {
|
||||
interpolate: {
|
||||
url: tooltipLinkUrl,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
return locale.baseText(errorTooltipMap[row.errorCode]);
|
||||
}
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function getErrorTooltipUrl(column: TestTableColumn<T>, row: T): string | undefined {
|
||||
if (hasError(row) && column.errorRoute?.(row)) {
|
||||
return router.resolve(column.errorRoute(row)!).href;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<N8nTooltip
|
||||
placement="right"
|
||||
:show-after="300"
|
||||
:disabled="getErrorTooltip(column, row) === undefined"
|
||||
>
|
||||
<template #content>
|
||||
<div v-n8n-html="getErrorTooltip(column, row)" />
|
||||
</template>
|
||||
<N8nBadge :theme="statusThemeMap[row.status]" class="mr-4xs">
|
||||
{{ statusLabelMap[row.status] }}
|
||||
</N8nBadge>
|
||||
</N8nTooltip>
|
||||
</template>
|
||||
|
||||
<style scoped lang="scss"></style>
|
|
@ -1,6 +1,7 @@
|
|||
<script setup lang="ts" generic="T extends object">
|
||||
import type { RouteLocationRaw } from 'vue-router';
|
||||
import TableCell from './TableCell.vue';
|
||||
import TableStatusCell from './TableStatusCell.vue';
|
||||
import { ElTable, ElTableColumn } from 'element-plus';
|
||||
import { ref, watch, nextTick, onMounted, onUnmounted } from 'vue';
|
||||
import type { TableInstance } from 'element-plus';
|
||||
|
@ -22,6 +23,7 @@ export type TestTableColumn<TRow> = {
|
|||
filters?: Array<{ text: string; value: string }>;
|
||||
filterMethod?: (value: string, row: TRow) => boolean;
|
||||
route?: (row: TRow) => RouteLocationRaw | undefined;
|
||||
errorRoute?: (row: TRow) => RouteLocationRaw | undefined;
|
||||
sortMethod?: (a: TRow, b: TRow) => number;
|
||||
openInNewTab?: boolean;
|
||||
formatter?: (row: TRow) => string;
|
||||
|
@ -29,6 +31,8 @@ export type TestTableColumn<TRow> = {
|
|||
|
||||
type TableRow = T & { id: string };
|
||||
|
||||
type TableRowWithStatus = TableRow & { status: string };
|
||||
|
||||
const MIN_TABLE_HEIGHT = 350;
|
||||
const MAX_TABLE_HEIGHT = 1400;
|
||||
const props = withDefaults(
|
||||
|
@ -94,6 +98,10 @@ const computeTableHeight = () => {
|
|||
tableHeight.value = `${height - 100}px`;
|
||||
};
|
||||
|
||||
function hasStatus(row: unknown): row is TableRowWithStatus {
|
||||
return typeof row === 'object' && row !== null && 'status' in row;
|
||||
}
|
||||
|
||||
onMounted(() => {
|
||||
computeTableHeight();
|
||||
|
||||
|
@ -131,8 +139,14 @@ onUnmounted(() => {
|
|||
data-test-id="table-column"
|
||||
>
|
||||
<template #default="{ row }">
|
||||
<TableStatusCell
|
||||
v-if="column.prop === 'status' && hasStatus(row)"
|
||||
:column="column"
|
||||
:row="row"
|
||||
/>
|
||||
<TableCell
|
||||
:key="row.status"
|
||||
v-else
|
||||
:key="row.id + column.prop"
|
||||
:column="column"
|
||||
:row="row"
|
||||
data-test-id="table-cell"
|
||||
|
|
|
@ -2875,6 +2875,7 @@
|
|||
"testDefinition.listRuns.status.cancelled": "Cancelled",
|
||||
"testDefinition.listRuns.status.error": "Error",
|
||||
"testDefinition.listRuns.status.success": "Success",
|
||||
"testDefinition.listRuns.status.warning": "Warning",
|
||||
"testDefinition.listRuns.metricsOverTime": "Metrics over time",
|
||||
"testDefinition.listRuns.status": "Status",
|
||||
"testDefinition.listRuns.runNumber": "Run #",
|
||||
|
@ -2884,11 +2885,20 @@
|
|||
"testDefinition.listRuns.noRuns.description": "Run a test to see the results here",
|
||||
"testDefinition.listRuns.deleteRuns": "No runs to delete | Delete {count} run | Delete {count} runs",
|
||||
"testDefinition.listRuns.noRuns.button": "Run Test",
|
||||
"testDefinition.listRuns.error.noPastExecutions": "No executions added to the specified tag",
|
||||
"testDefinition.listRuns.error.evaluationWorkflowNotFound": "Selected evaluation workflow does not exist. <br /><a href=\"{url}\">Fix test configuration</a>.",
|
||||
"testDefinition.runDetail.ranAt": "Ran at",
|
||||
"testDefinition.runDetail.testCase": "Test case",
|
||||
"testDefinition.runDetail.testCase.id": "Test case ID",
|
||||
"testDefinition.runDetail.testCase.status": "Test case status",
|
||||
"testDefinition.runDetail.totalCases": "Total cases",
|
||||
"testDefinition.runDetail.error.mockedNodeMissing": "Output for a mocked node does not exist in benchmark execution. <br /><a href=\"{url}\">Fix test configuration</a>.",
|
||||
"testDefinition.runDetail.error.executionFailed": "Failed to execute workflow with benchmark trigger. <br /><a href=\"{url}\" target=\"_blank\">View execution</a>.",
|
||||
"testDefinition.runDetail.error.evaluationFailed": "Failed to execute the evaluation workflow. <br /><a href=\"{url}\" target=\"_blank\">View evaluation execution</a>.",
|
||||
"testDefinition.runDetail.error.triggerNoLongerExists": "Trigger in benchmark execution no longer exists in workflow. <br /><a href=\"{url}\" target=\"_blank\">View benchmark</a>.",
|
||||
"testDefinition.runDetail.error.metricsMissing": "Metrics defined in test were not returned by evaluation workflow. <br /><a href=\"{url}\">Fix test configuration</a>.",
|
||||
"testDefinition.runDetail.error.unknownMetrics": "Evaluation workflow defined metrics that are not defined in the test. <br /><a href=\"{url}\">Fix test configuration</a>.",
|
||||
"testDefinition.runDetail.error.invalidMetrics": "Evaluation workflow returned invalid metrics. Only numeric values are expected. View evaluation execution. <br /><a href=\"{url}\" target=\"_blank\">View evaluation execution</a>.",
|
||||
"testDefinition.runTest": "Run Test",
|
||||
"testDefinition.cancelTestRun": "Cancel Test Run",
|
||||
"testDefinition.notImplemented": "This feature is not implemented yet!",
|
||||
|
|
|
@ -28,6 +28,65 @@ const filteredTestCases = computed(() => {
|
|||
return testCases.value;
|
||||
});
|
||||
|
||||
const getErrorTooltipLinkRoute = (row: TestCaseExecutionRecord) => {
|
||||
if (row.errorCode === 'FAILED_TO_EXECUTE_EVALUATION_WORKFLOW') {
|
||||
return {
|
||||
name: VIEWS.EXECUTION_PREVIEW,
|
||||
params: {
|
||||
name: test.value?.evaluationWorkflowId,
|
||||
executionId: row.evaluationExecutionId,
|
||||
},
|
||||
};
|
||||
} else if (row.errorCode === 'MOCKED_NODE_DOES_NOT_EXIST') {
|
||||
return {
|
||||
name: VIEWS.TEST_DEFINITION_EDIT,
|
||||
params: {
|
||||
testId: testId.value,
|
||||
},
|
||||
};
|
||||
} else if (row.errorCode === 'FAILED_TO_EXECUTE_WORKFLOW') {
|
||||
return {
|
||||
name: VIEWS.EXECUTION_PREVIEW,
|
||||
params: {
|
||||
name: test.value?.workflowId,
|
||||
executionId: row.executionId,
|
||||
},
|
||||
};
|
||||
} else if (row.errorCode === 'TRIGGER_NO_LONGER_EXISTS') {
|
||||
return {
|
||||
name: VIEWS.EXECUTION_PREVIEW,
|
||||
params: {
|
||||
name: test.value?.workflowId,
|
||||
executionId: row.pastExecutionId,
|
||||
},
|
||||
};
|
||||
} else if (row.errorCode === 'METRICS_MISSING') {
|
||||
return {
|
||||
name: VIEWS.TEST_DEFINITION_EDIT,
|
||||
params: {
|
||||
testId: testId.value,
|
||||
},
|
||||
};
|
||||
} else if (row.errorCode === 'UNKNOWN_METRICS') {
|
||||
return {
|
||||
name: VIEWS.TEST_DEFINITION_EDIT,
|
||||
params: {
|
||||
testId: testId.value,
|
||||
},
|
||||
};
|
||||
} else if (row.errorCode === 'INVALID_METRICS') {
|
||||
return {
|
||||
name: VIEWS.EXECUTION_PREVIEW,
|
||||
params: {
|
||||
name: test.value?.evaluationWorkflowId,
|
||||
executionId: row.evaluationExecutionId,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return undefined;
|
||||
};
|
||||
|
||||
const columns = computed(
|
||||
(): Array<TestTableColumn<TestCaseExecutionRecord>> => [
|
||||
{
|
||||
|
@ -60,6 +119,7 @@ const columns = computed(
|
|||
{ text: locale.baseText('testDefinition.listRuns.status.success'), value: 'success' },
|
||||
{ text: locale.baseText('testDefinition.listRuns.status.error'), value: 'error' },
|
||||
],
|
||||
errorRoute: getErrorTooltipLinkRoute,
|
||||
filterMethod: (value: string, row: TestCaseExecutionRecord) => row.status === value,
|
||||
},
|
||||
...Object.keys(run.value?.metrics ?? {}).map((metric) => ({
|
||||
|
|
Loading…
Reference in a new issue