fix(core): Node mocking for evaluation executions (no-changelog) (#12541)

This commit is contained in:
Eugene 2025-01-13 10:45:29 +01:00 committed by GitHub
parent 865fc21276
commit dcd7feb973
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 48 additions and 17 deletions

View file

@ -19,7 +19,7 @@ import type { WorkflowRepository } from '@/databases/repositories/workflow.repos
import { LoadNodesAndCredentials } from '@/load-nodes-and-credentials';
import { NodeTypes } from '@/node-types';
import type { WorkflowRunner } from '@/workflow-runner';
import { mockInstance } from '@test/mocking';
import { mockInstance, mockLogger } from '@test/mocking';
import { mockNodeTypesData } from '@test-integration/utils/node-types-data';
import { TestRunnerService } from '../test-runner.service.ee';
@ -129,6 +129,9 @@ function mockEvaluationExecutionData(metrics: Record<string, GenericValue>) {
});
}
const errorReporter = mock<ErrorReporter>();
const logger = mockLogger();
describe('TestRunnerService', () => {
const executionRepository = mock<ExecutionRepository>();
const workflowRepository = mock<WorkflowRepository>();
@ -176,6 +179,7 @@ describe('TestRunnerService', () => {
test('should create an instance of TestRunnerService', async () => {
const testRunnerService = new TestRunnerService(
logger,
workflowRepository,
workflowRunner,
executionRepository,
@ -183,7 +187,7 @@ describe('TestRunnerService', () => {
testRunRepository,
testMetricRepository,
mockNodeTypes,
mock<ErrorReporter>(),
errorReporter,
);
expect(testRunnerService).toBeInstanceOf(TestRunnerService);
@ -191,6 +195,7 @@ describe('TestRunnerService', () => {
test('should create and run test cases from past executions', async () => {
const testRunnerService = new TestRunnerService(
logger,
workflowRepository,
workflowRunner,
executionRepository,
@ -198,7 +203,7 @@ describe('TestRunnerService', () => {
testRunRepository,
testMetricRepository,
mockNodeTypes,
mock<ErrorReporter>(),
errorReporter,
);
workflowRepository.findById.calledWith('workflow-under-test-id').mockResolvedValueOnce({
@ -229,6 +234,7 @@ describe('TestRunnerService', () => {
test('should run both workflow under test and evaluation workflow', async () => {
const testRunnerService = new TestRunnerService(
logger,
workflowRepository,
workflowRunner,
executionRepository,
@ -236,7 +242,7 @@ describe('TestRunnerService', () => {
testRunRepository,
testMetricRepository,
mockNodeTypes,
mock<ErrorReporter>(),
errorReporter,
);
workflowRepository.findById.calledWith('workflow-under-test-id').mockResolvedValueOnce({
@ -330,6 +336,7 @@ describe('TestRunnerService', () => {
test('should properly count passed and failed executions', async () => {
const testRunnerService = new TestRunnerService(
logger,
workflowRepository,
workflowRunner,
executionRepository,
@ -337,7 +344,7 @@ describe('TestRunnerService', () => {
testRunRepository,
testMetricRepository,
mockNodeTypes,
mock<ErrorReporter>(),
errorReporter,
);
workflowRepository.findById.calledWith('workflow-under-test-id').mockResolvedValueOnce({
@ -388,6 +395,7 @@ describe('TestRunnerService', () => {
test('should properly count failed test executions', async () => {
const testRunnerService = new TestRunnerService(
logger,
workflowRepository,
workflowRunner,
executionRepository,
@ -395,7 +403,7 @@ describe('TestRunnerService', () => {
testRunRepository,
testMetricRepository,
mockNodeTypes,
mock<ErrorReporter>(),
errorReporter,
);
workflowRepository.findById.calledWith('workflow-under-test-id').mockResolvedValueOnce({
@ -442,6 +450,7 @@ describe('TestRunnerService', () => {
test('should properly count failed evaluations', async () => {
const testRunnerService = new TestRunnerService(
logger,
workflowRepository,
workflowRunner,
executionRepository,
@ -449,7 +458,7 @@ describe('TestRunnerService', () => {
testRunRepository,
testMetricRepository,
mockNodeTypes,
mock<ErrorReporter>(),
errorReporter,
);
workflowRepository.findById.calledWith('workflow-under-test-id').mockResolvedValueOnce({
@ -500,6 +509,7 @@ describe('TestRunnerService', () => {
test('should specify correct start nodes when running workflow under test', async () => {
const testRunnerService = new TestRunnerService(
logger,
workflowRepository,
workflowRunner,
executionRepository,
@ -507,7 +517,7 @@ describe('TestRunnerService', () => {
testRunRepository,
testMetricRepository,
mockNodeTypes,
mock<ErrorReporter>(),
errorReporter,
);
workflowRepository.findById.calledWith('workflow-under-test-id').mockResolvedValueOnce({
@ -574,6 +584,7 @@ describe('TestRunnerService', () => {
test('should properly choose trigger and start nodes', async () => {
const testRunnerService = new TestRunnerService(
logger,
workflowRepository,
workflowRunner,
executionRepository,
@ -581,7 +592,7 @@ describe('TestRunnerService', () => {
testRunRepository,
testMetricRepository,
mockNodeTypes,
mock<ErrorReporter>(),
errorReporter,
);
const startNodesData = (testRunnerService as any).getStartNodesData(
@ -599,6 +610,7 @@ describe('TestRunnerService', () => {
test('should properly choose trigger and start nodes 2', async () => {
const testRunnerService = new TestRunnerService(
logger,
workflowRepository,
workflowRunner,
executionRepository,
@ -606,7 +618,7 @@ describe('TestRunnerService', () => {
testRunRepository,
testMetricRepository,
mockNodeTypes,
mock<ErrorReporter>(),
errorReporter,
);
const startNodesData = (testRunnerService as any).getStartNodesData(

View file

@ -1,6 +1,6 @@
import { Service } from '@n8n/di';
import { parse } from 'flatted';
import { ErrorReporter } from 'n8n-core';
import { ErrorReporter, Logger } from 'n8n-core';
import { NodeConnectionType, Workflow } from 'n8n-workflow';
import type {
IDataObject,
@ -39,6 +39,7 @@ import { createPinData, getPastExecutionTriggerNode } from './utils.ee';
@Service()
export class TestRunnerService {
constructor(
private readonly logger: Logger,
private readonly workflowRepository: WorkflowRepository,
private readonly workflowRunner: WorkflowRunner,
private readonly executionRepository: ExecutionRepository,
@ -115,8 +116,9 @@ export class TestRunnerService {
executionMode: 'evaluation',
runData: {},
pinData,
workflowData: workflow,
workflowData: { ...workflow, pinData },
userId,
partialExecutionVersion: '1',
};
// Trigger the workflow under test with mocked data
@ -203,6 +205,8 @@ export class TestRunnerService {
* Creates a new test run for the given test definition.
*/
async runTest(user: User, test: TestDefinition): Promise<void> {
this.logger.debug('Starting new test run', { testId: test.id });
const workflow = await this.workflowRepository.findById(test.workflowId);
assert(workflow, 'Workflow not found');
@ -227,6 +231,8 @@ export class TestRunnerService {
.andWhere('execution.workflowId = :workflowId', { workflowId: test.workflowId })
.getMany();
this.logger.debug('Found past executions', { count: pastExecutions.length });
// Get the metrics to collect from the evaluation workflow
const testMetricNames = await this.getTestMetricNames(test.id);
@ -238,6 +244,8 @@ export class TestRunnerService {
const metrics = new EvaluationMetrics(testMetricNames);
for (const { id: pastExecutionId } of pastExecutions) {
this.logger.debug('Running test case', { pastExecutionId });
try {
// Fetch past execution with data
const pastExecution = await this.executionRepository.findOne({
@ -257,6 +265,8 @@ export class TestRunnerService {
user.id,
);
this.logger.debug('Test case execution finished', { pastExecutionId });
// In case of a permission check issue, the test case execution will be undefined.
// Skip them, increment the failed count and continue with the next test case
if (!testCaseExecution) {
@ -279,6 +289,8 @@ export class TestRunnerService {
);
assert(evalExecution);
this.logger.debug('Evaluation execution finished', { pastExecutionId });
metrics.addResults(this.extractEvaluationResult(evalExecution));
if (evalExecution.data.resultData.error) {
@ -297,5 +309,7 @@ export class TestRunnerService {
const aggregatedMetrics = metrics.getAggregatedMetrics();
await this.testRunRepository.markAsCompleted(testRun.id, aggregatedMetrics);
this.logger.debug('Test run finished', { testId: test.id });
}
}

View file

@ -71,7 +71,11 @@ export class ManualExecutionService {
},
};
const workflowExecute = new WorkflowExecute(additionalData, 'manual', executionData);
const workflowExecute = new WorkflowExecute(
additionalData,
data.executionMode,
executionData,
);
return workflowExecute.processRunExecutionData(workflow);
} else if (
data.runData === undefined ||

View file

@ -454,7 +454,7 @@ export async function executeWebhook(
}
let pinData: IPinData | undefined;
const usePinData = executionMode === 'manual';
const usePinData = ['manual', 'evaluation'].includes(executionMode);
if (usePinData) {
pinData = workflowData.pinData;
runExecutionData.resultData.pinData = pinData;

View file

@ -234,7 +234,7 @@ export class WorkflowRunner {
}
let pinData: IPinData | undefined;
if (data.executionMode === 'manual') {
if (['manual', 'evaluation'].includes(data.executionMode)) {
pinData = data.pinData ?? data.workflowData.pinData;
}

View file

@ -1923,7 +1923,7 @@ export function useCanvasOperations({ router }: { router: ReturnType<typeof useR
workflowsStore.setWorkflowExecutionData(data);
if (data.mode !== 'manual') {
if (!['manual', 'evaluation'].includes(data.mode)) {
workflowsStore.setWorkflowPinData({});
}

View file

@ -1385,7 +1385,8 @@ async function onPostMessageReceived(messageEvent: MessageEvent) {
try {
// If this NodeView is used in preview mode (in iframe) it will not have access to the main app store
// so everything it needs has to be sent using post messages and passed down to child components
isProductionExecutionPreview.value = json.executionMode !== 'manual';
isProductionExecutionPreview.value =
json.executionMode !== 'manual' && json.executionMode !== 'evaluation';
await onOpenExecution(json.executionId);
canOpenNDV.value = json.canOpenNDV ?? true;