2022-07-24 08:25:01 -07:00
|
|
|
/* eslint-disable @typescript-eslint/no-unsafe-argument */
|
2023-07-31 02:00:48 -07:00
|
|
|
|
2020-07-29 05:12:54 -07:00
|
|
|
/* eslint-disable @typescript-eslint/no-unsafe-member-access */
|
2023-07-31 02:00:48 -07:00
|
|
|
|
2021-08-26 04:45:15 -07:00
|
|
|
/* eslint-disable @typescript-eslint/no-shadow */
|
2020-07-29 05:12:54 -07:00
|
|
|
/* eslint-disable @typescript-eslint/no-unsafe-assignment */
|
2023-07-31 02:00:48 -07:00
|
|
|
|
2023-01-27 05:56:56 -08:00
|
|
|
import type { IProcessMessage } from 'n8n-core';
|
2023-06-20 10:13:18 -07:00
|
|
|
import { WorkflowExecute } from 'n8n-core';
|
2021-08-29 11:58:11 -07:00
|
|
|
|
2023-01-27 05:56:56 -08:00
|
|
|
import type {
|
2021-04-16 09:33:36 -07:00
|
|
|
ExecutionError,
|
2021-11-05 09:45:51 -07:00
|
|
|
IDeferredPromise,
|
|
|
|
IExecuteResponsePromiseData,
|
2021-08-29 11:58:11 -07:00
|
|
|
IRun,
|
2019-08-08 11:38:25 -07:00
|
|
|
WorkflowExecuteMode,
|
2020-10-22 06:46:03 -07:00
|
|
|
WorkflowHooks,
|
2023-01-27 05:56:56 -08:00
|
|
|
} from 'n8n-workflow';
|
|
|
|
import {
|
|
|
|
ErrorReporterProxy as ErrorReporter,
|
|
|
|
LoggerProxy as Logger,
|
|
|
|
Workflow,
|
2021-04-16 09:33:36 -07:00
|
|
|
WorkflowOperationError,
|
2019-08-08 11:38:25 -07:00
|
|
|
} from 'n8n-workflow';
|
2021-08-29 11:58:11 -07:00
|
|
|
|
2022-04-08 14:32:08 -07:00
|
|
|
import PCancelable from 'p-cancelable';
|
2019-08-09 04:12:00 -07:00
|
|
|
import { join as pathJoin } from 'path';
|
2019-08-08 11:38:25 -07:00
|
|
|
import { fork } from 'child_process';
|
2021-08-29 11:58:11 -07:00
|
|
|
|
2023-02-21 10:21:56 -08:00
|
|
|
import { ActiveExecutions } from '@/ActiveExecutions';
|
2022-11-09 06:25:00 -08:00
|
|
|
import config from '@/config';
|
|
|
|
import { ExternalHooks } from '@/ExternalHooks';
|
2023-01-27 05:56:56 -08:00
|
|
|
import type {
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
IExecutionResponse,
|
2019-08-08 11:38:25 -07:00
|
|
|
IProcessMessageDataHook,
|
|
|
|
IWorkflowExecutionDataProcess,
|
|
|
|
IWorkflowExecutionDataProcessWithExecution,
|
2022-11-09 06:25:00 -08:00
|
|
|
} from '@/Interfaces';
|
|
|
|
import { NodeTypes } from '@/NodeTypes';
|
2023-03-16 07:34:13 -07:00
|
|
|
import type { Job, JobData, JobQueue, JobResponse } from '@/Queue';
|
2023-08-10 05:06:16 -07:00
|
|
|
// eslint-disable-next-line import/no-cycle
|
2023-03-16 07:34:13 -07:00
|
|
|
import { Queue } from '@/Queue';
|
2022-11-09 06:25:00 -08:00
|
|
|
import * as WebhookHelpers from '@/WebhookHelpers';
|
2023-08-10 05:06:16 -07:00
|
|
|
// eslint-disable-next-line import/no-cycle
|
2022-11-09 06:25:00 -08:00
|
|
|
import * as WorkflowHelpers from '@/WorkflowHelpers';
|
2023-08-10 05:06:16 -07:00
|
|
|
// eslint-disable-next-line import/no-cycle
|
2022-11-09 06:25:00 -08:00
|
|
|
import * as WorkflowExecuteAdditionalData from '@/WorkflowExecuteAdditionalData';
|
|
|
|
import { generateFailedExecutionFromError } from '@/WorkflowHelpers';
|
|
|
|
import { initErrorHandling } from '@/ErrorReporting';
|
2022-11-11 02:14:45 -08:00
|
|
|
import { PermissionChecker } from '@/UserManagement/PermissionChecker';
|
2023-02-21 10:21:56 -08:00
|
|
|
import { Push } from '@/push';
|
2023-03-30 00:59:04 -07:00
|
|
|
import { eventBus } from './eventbus';
|
2023-02-17 01:54:07 -08:00
|
|
|
import { recoverExecutionDataFromEventLogMessages } from './eventbus/MessageEventBus/recoverEvents';
|
2023-02-21 10:21:56 -08:00
|
|
|
import { Container } from 'typedi';
|
|
|
|
import { InternalHooks } from './InternalHooks';
|
2023-07-13 01:14:48 -07:00
|
|
|
import { ExecutionRepository } from '@db/repositories';
|
2019-08-08 11:38:25 -07:00
|
|
|
|
|
|
|
export class WorkflowRunner {
|
2023-02-21 10:21:56 -08:00
|
|
|
activeExecutions: ActiveExecutions;
|
2021-08-29 11:58:11 -07:00
|
|
|
|
2023-02-10 06:02:47 -08:00
|
|
|
push: Push;
|
2019-08-08 11:38:25 -07:00
|
|
|
|
2023-03-16 07:34:13 -07:00
|
|
|
jobQueue: JobQueue;
|
2019-08-08 11:38:25 -07:00
|
|
|
|
|
|
|
constructor() {
|
2023-02-21 10:21:56 -08:00
|
|
|
this.push = Container.get(Push);
|
|
|
|
this.activeExecutions = Container.get(ActiveExecutions);
|
2019-08-08 11:38:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2022-09-02 07:13:17 -07:00
|
|
|
* The process did send a hook message so execute the appropriate hook
|
2019-08-08 11:38:25 -07:00
|
|
|
*/
|
2019-12-19 14:07:55 -08:00
|
|
|
processHookMessage(workflowHooks: WorkflowHooks, hookData: IProcessMessageDataHook) {
|
2023-05-23 17:01:45 -07:00
|
|
|
void workflowHooks.executeHookFunctions(hookData.hook, hookData.parameters);
|
2019-08-08 11:38:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The process did error
|
|
|
|
*/
|
2021-07-10 02:34:41 -07:00
|
|
|
async processError(
|
|
|
|
error: ExecutionError,
|
|
|
|
startedAt: Date,
|
|
|
|
executionMode: WorkflowExecuteMode,
|
|
|
|
executionId: string,
|
|
|
|
hooks?: WorkflowHooks,
|
|
|
|
) {
|
2022-11-04 09:34:47 -07:00
|
|
|
ErrorReporter.error(error);
|
|
|
|
|
2019-08-08 11:38:25 -07:00
|
|
|
const fullRunData: IRun = {
|
|
|
|
data: {
|
|
|
|
resultData: {
|
2021-07-10 02:34:41 -07:00
|
|
|
error: {
|
|
|
|
...error,
|
|
|
|
message: error.message,
|
|
|
|
stack: error.stack,
|
|
|
|
},
|
2019-08-08 11:38:25 -07:00
|
|
|
runData: {},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
finished: false,
|
|
|
|
mode: executionMode,
|
|
|
|
startedAt,
|
|
|
|
stoppedAt: new Date(),
|
2023-02-17 01:54:07 -08:00
|
|
|
status: 'error',
|
2019-08-08 11:38:25 -07:00
|
|
|
};
|
|
|
|
|
2023-02-17 01:54:07 -08:00
|
|
|
// The following will attempt to recover runData from event logs
|
|
|
|
// Note that this will only work as long as the event logs actually contain the events from this workflow execution
|
|
|
|
// Since processError is run almost immediately after the workflow execution has failed, it is likely that the event logs
|
|
|
|
// does contain those messages.
|
|
|
|
try {
|
|
|
|
// Search for messages for this executionId in event logs
|
2023-03-30 00:59:04 -07:00
|
|
|
const eventLogMessages = await eventBus.getEventsByExecutionId(executionId);
|
2023-02-17 01:54:07 -08:00
|
|
|
// Attempt to recover more better runData from these messages (but don't update the execution db entry yet)
|
|
|
|
if (eventLogMessages.length > 0) {
|
|
|
|
const eventLogExecutionData = await recoverExecutionDataFromEventLogMessages(
|
|
|
|
executionId,
|
|
|
|
eventLogMessages,
|
|
|
|
false,
|
|
|
|
);
|
|
|
|
if (eventLogExecutionData) {
|
|
|
|
fullRunData.data.resultData.runData = eventLogExecutionData.resultData.runData;
|
|
|
|
fullRunData.status = 'crashed';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-20 10:13:18 -07:00
|
|
|
const executionFlattedData = await Container.get(ExecutionRepository).findSingleExecution(
|
2023-02-17 01:54:07 -08:00
|
|
|
executionId,
|
2023-06-20 10:13:18 -07:00
|
|
|
{
|
|
|
|
includeData: true,
|
|
|
|
},
|
2023-02-17 01:54:07 -08:00
|
|
|
);
|
2023-06-20 10:13:18 -07:00
|
|
|
|
|
|
|
if (executionFlattedData) {
|
|
|
|
void Container.get(InternalHooks).onWorkflowCrashed(
|
|
|
|
executionId,
|
|
|
|
executionMode,
|
|
|
|
executionFlattedData?.workflowData,
|
|
|
|
// TODO: get metadata to be sent here
|
|
|
|
// executionFlattedData?.metadata,
|
|
|
|
);
|
|
|
|
}
|
2023-02-17 01:54:07 -08:00
|
|
|
} catch {
|
|
|
|
// Ignore errors
|
|
|
|
}
|
|
|
|
|
2019-08-08 11:38:25 -07:00
|
|
|
// Remove from active execution with empty data. That will
|
|
|
|
// set the execution to failed.
|
|
|
|
this.activeExecutions.remove(executionId, fullRunData);
|
2021-07-10 02:34:41 -07:00
|
|
|
|
|
|
|
if (hooks) {
|
|
|
|
await hooks.executeHookFunctions('workflowExecuteAfter', [fullRunData]);
|
|
|
|
}
|
2019-08-08 11:38:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2020-01-17 17:34:31 -08:00
|
|
|
* Run the workflow
|
2019-08-08 11:38:25 -07:00
|
|
|
*
|
2019-10-14 22:36:53 -07:00
|
|
|
* @param {boolean} [loadStaticData] If set will the static data be loaded from
|
|
|
|
* the workflow and added to input data
|
2019-08-08 11:38:25 -07:00
|
|
|
*/
|
2021-08-21 05:11:32 -07:00
|
|
|
async run(
|
|
|
|
data: IWorkflowExecutionDataProcess,
|
|
|
|
loadStaticData?: boolean,
|
|
|
|
realtime?: boolean,
|
|
|
|
executionId?: string,
|
2021-11-05 09:45:51 -07:00
|
|
|
responsePromise?: IDeferredPromise<IExecuteResponsePromiseData>,
|
2021-08-21 05:11:32 -07:00
|
|
|
): Promise<string> {
|
2022-04-08 10:37:27 -07:00
|
|
|
const executionsMode = config.getEnv('executions.mode');
|
2023-01-02 03:14:39 -08:00
|
|
|
const executionsProcess = config.getEnv('executions.process');
|
|
|
|
|
|
|
|
await initErrorHandling();
|
|
|
|
|
|
|
|
if (executionsMode === 'queue') {
|
2023-03-16 07:34:13 -07:00
|
|
|
const queue = Container.get(Queue);
|
2023-01-02 03:14:39 -08:00
|
|
|
this.jobQueue = queue.getBullObjectInstance();
|
|
|
|
}
|
2020-10-20 10:01:40 -07:00
|
|
|
|
2021-02-08 23:59:32 -08:00
|
|
|
if (executionsMode === 'queue' && data.executionMode !== 'manual') {
|
|
|
|
// Do not run "manual" executions in bull because sending events to the
|
|
|
|
// frontend would not be possible
|
2023-01-02 03:14:39 -08:00
|
|
|
executionId = await this.enqueueExecution(
|
2021-11-05 09:45:51 -07:00
|
|
|
data,
|
|
|
|
loadStaticData,
|
|
|
|
realtime,
|
|
|
|
executionId,
|
|
|
|
responsePromise,
|
|
|
|
);
|
2020-10-20 10:01:40 -07:00
|
|
|
} else {
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (executionsProcess === 'main') {
|
|
|
|
executionId = await this.runMainProcess(data, loadStaticData, executionId, responsePromise);
|
|
|
|
} else {
|
|
|
|
executionId = await this.runSubprocess(data, loadStaticData, executionId, responsePromise);
|
|
|
|
}
|
|
|
|
void Container.get(InternalHooks).onWorkflowBeforeExecute(executionId, data);
|
2020-10-20 10:01:40 -07:00
|
|
|
}
|
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
// only run these when not in queue mode or when the execution is manual,
|
|
|
|
// since these calls are now done by the worker directly
|
|
|
|
if (executionsMode !== 'queue' || data.executionMode === 'manual') {
|
|
|
|
const postExecutePromise = this.activeExecutions.getPostExecutePromise(executionId);
|
|
|
|
const externalHooks = Container.get(ExternalHooks);
|
2021-10-18 20:57:49 -07:00
|
|
|
postExecutePromise
|
2020-10-20 10:01:40 -07:00
|
|
|
.then(async (executionData) => {
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
void Container.get(InternalHooks).onWorkflowPostExecute(
|
|
|
|
executionId!,
|
2021-12-25 04:51:42 -08:00
|
|
|
data.workflowData,
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
executionData,
|
|
|
|
data.userId,
|
|
|
|
);
|
|
|
|
if (externalHooks.exists('workflow.postExecute')) {
|
|
|
|
try {
|
|
|
|
await externalHooks.run('workflow.postExecute', [
|
|
|
|
executionData,
|
|
|
|
data.workflowData,
|
|
|
|
executionId,
|
|
|
|
]);
|
|
|
|
} catch (error) {
|
|
|
|
ErrorReporter.error(error);
|
|
|
|
console.error('There was a problem running hook "workflow.postExecute"', error);
|
|
|
|
}
|
|
|
|
}
|
2020-10-20 10:01:40 -07:00
|
|
|
})
|
|
|
|
.catch((error) => {
|
2022-11-04 09:34:47 -07:00
|
|
|
ErrorReporter.error(error);
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
console.error('There was a problem running internal hook "onWorkflowPostExecute"', error);
|
2020-10-20 10:01:40 -07:00
|
|
|
});
|
2020-01-17 17:34:31 -08:00
|
|
|
}
|
|
|
|
|
2020-10-20 10:01:40 -07:00
|
|
|
return executionId;
|
2020-01-17 17:34:31 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Run the workflow in current process
|
|
|
|
*
|
|
|
|
* @param {boolean} [loadStaticData] If set will the static data be loaded from
|
|
|
|
* the workflow and added to input data
|
|
|
|
*/
|
2021-08-21 05:11:32 -07:00
|
|
|
async runMainProcess(
|
|
|
|
data: IWorkflowExecutionDataProcess,
|
|
|
|
loadStaticData?: boolean,
|
|
|
|
restartExecutionId?: string,
|
2021-11-05 09:45:51 -07:00
|
|
|
responsePromise?: IDeferredPromise<IExecuteResponsePromiseData>,
|
2021-08-21 05:11:32 -07:00
|
|
|
): Promise<string> {
|
2023-01-02 08:42:32 -08:00
|
|
|
const workflowId = data.workflowData.id;
|
|
|
|
if (loadStaticData === true && workflowId) {
|
|
|
|
data.workflowData.staticData = await WorkflowHelpers.getStaticDataById(workflowId);
|
2020-01-17 17:34:31 -08:00
|
|
|
}
|
|
|
|
|
2023-02-21 10:21:56 -08:00
|
|
|
const nodeTypes = Container.get(NodeTypes);
|
2020-01-17 17:34:31 -08:00
|
|
|
|
2021-04-17 07:44:07 -07:00
|
|
|
// Soft timeout to stop workflow execution after current running node
|
|
|
|
// Changes were made by adding the `workflowTimeout` to the `additionalData`
|
|
|
|
// So that the timeout will also work for executions with nested workflows.
|
|
|
|
let executionTimeout: NodeJS.Timeout;
|
|
|
|
|
2023-03-24 05:11:48 -07:00
|
|
|
const workflowSettings = data.workflowData.settings ?? {};
|
|
|
|
let workflowTimeout = workflowSettings.executionTimeout ?? config.getEnv('executions.timeout'); // initialize with default
|
2021-04-17 07:44:07 -07:00
|
|
|
if (workflowTimeout > 0) {
|
2022-04-08 10:37:27 -07:00
|
|
|
workflowTimeout = Math.min(workflowTimeout, config.getEnv('executions.maxTimeout'));
|
2021-04-17 07:44:07 -07:00
|
|
|
}
|
|
|
|
|
2020-02-15 17:07:01 -08:00
|
|
|
const workflow = new Workflow({
|
2023-01-02 08:42:32 -08:00
|
|
|
id: workflowId,
|
2020-02-15 17:07:01 -08:00
|
|
|
name: data.workflowData.name,
|
|
|
|
nodes: data.workflowData.nodes,
|
|
|
|
connections: data.workflowData.connections,
|
|
|
|
active: data.workflowData.active,
|
|
|
|
nodeTypes,
|
|
|
|
staticData: data.workflowData.staticData,
|
2023-03-24 05:11:48 -07:00
|
|
|
settings: workflowSettings,
|
2020-02-15 17:07:01 -08:00
|
|
|
});
|
2021-08-20 09:57:30 -07:00
|
|
|
const additionalData = await WorkflowExecuteAdditionalData.getBase(
|
feat: Add User Management (#2636)
* ✅ adjust tests
* 🛠 refactor user invites to be indempotent (#2791)
* 🔐 Encrypt SMTP pass for user management backend (#2793)
* :package: Add crypto-js to /cli
* :package: Update package-lock.json
* :sparkles: Create type for SMTP config
* :zap: Encrypt SMTP pass
* :zap: Update format for `userManagement.emails.mode`
* :zap: Update format for `binaryDataManager.mode`
* :zap: Update format for `logs.level`
* :fire: Remove logging
* :shirt: Fix lint
* 👰 n8n 2826 um wedding FE<>BE (#2789)
* remove mocks
* update authorization func
* lock down default role
* 🐛 fix requiring authentication for OPTIONS requests
* :bug: fix cors and cookie issues in dev
* update setup route
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* update telemetry
* 🐛 preload role for users
* :bug: remove auth for password reset routes
* 🐛 fix forgot-password flow
* :zap: allow workflow tag disabling
* update telemetry init
* add reset
* clear error notifications on signin
* remove load settings from node view
* remove user id from user state
* inherit existing user props
* go back in history on button click
* use replace to force redirect
* update stories
* :zap: add env check for tag create
* :test_tube: Add `/users` tests for user management backend (#2790)
* :zap: Refactor users namespace
* :zap: Adjust fillout endpoint
* :zap: Refactor initTestServer arg
* :pencil2: Specify agent type
* :pencil2: Specify role type
* :zap: Tighten `/users/:id` check
* :sparkles: Add initial tests
* :truck: Reposition init server map
* :zap: Set constants in `validatePassword()`
* :zap: Tighten `/users/:id` check
* :zap: Improve checks in `/users/:id`
* :sparkles: Add tests for `/users/:id`
* :package: Update package-lock.json
* :zap: Simplify expectation
* :zap: Reuse util for authless agent
* :truck: Make role names consistent
* :blue_book: Tighten namespaces map type
* :fire: Remove unneeded default arg
* :sparkles: Add tests for `POST /users`
* :blue_book: Create test SMTP account type
* :pencil2: Improve wording
* :art: Formatting
* :fire: Remove temp fix
* :zap: Replace helper with config call
* :zap: Fix failing tests
* :fire: Remove outdated test
* :fire: Remove unused helper
* :zap: Increase readability of domain fetcher
* :zap: Refactor payload validation
* :fire: Remove repetition
* :rewind: Restore logging
* :zap: Initialize logger in tests
* :fire: Remove redundancy from check
* :truck: Move `globalOwnerRole` fetching to global scope
* :fire: Remove unused imports
* :truck: Move random utils to own module
* :truck: Move test types to own module
* :pencil2: Add dividers to utils
* :pencil2: Reorder `initTestServer` param docstring
* :pencil2: Add TODO comment
* :zap: Dry up member creation
* :zap: Tighten search criteria
* :test_tube: Add expectation to `GET /users`
* :zap: Create role fetcher utils
* :zap: Create one more role fetch util
* :fire: Remove unneeded DB query
* :test_tube: Add expectation to `POST /users`
* :test_tube: Add expectation to `DELETE /users/:id`
* :test_tube: Add another expectation to `DELETE /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :test_tube: Adjust expectations in `POST /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :shirt: Fix build
* :zap: Update method
* :blue_book: Fix `userToDelete` type
* :zap: Refactor `createAgent()`
* :zap: Make role fetching global
* :zap: Optimize roles fetching
* :zap: Centralize member creation
* :zap: Refactor truncation helper
* :test_tube: Add teardown to `DELETE /users/:id`
* :test_tube: Add DB expectations to users tests
* :fire: Remove pass validation due to hash
* :pencil2: Improve pass validation error message
* :zap: Improve owner pass validation
* :zap: Create logger initialization helper
* :zap: Optimize helpers
* :zap: Restructure `getAllRoles` helper
* :test_tube: Add password reset flow tests for user management backend (#2807)
* :zap: Refactor users namespace
* :zap: Adjust fillout endpoint
* :zap: Refactor initTestServer arg
* :pencil2: Specify agent type
* :pencil2: Specify role type
* :zap: Tighten `/users/:id` check
* :sparkles: Add initial tests
* :truck: Reposition init server map
* :zap: Set constants in `validatePassword()`
* :zap: Tighten `/users/:id` check
* :zap: Improve checks in `/users/:id`
* :sparkles: Add tests for `/users/:id`
* :package: Update package-lock.json
* :zap: Simplify expectation
* :zap: Reuse util for authless agent
* :truck: Make role names consistent
* :blue_book: Tighten namespaces map type
* :fire: Remove unneeded default arg
* :sparkles: Add tests for `POST /users`
* :blue_book: Create test SMTP account type
* :pencil2: Improve wording
* :art: Formatting
* :fire: Remove temp fix
* :zap: Replace helper with config call
* :zap: Fix failing tests
* :fire: Remove outdated test
* :sparkles: Add tests for password reset flow
* :pencil2: Fix test wording
* :zap: Set password reset namespace
* :fire: Remove unused helper
* :zap: Increase readability of domain fetcher
* :zap: Refactor payload validation
* :fire: Remove repetition
* :rewind: Restore logging
* :zap: Initialize logger in tests
* :fire: Remove redundancy from check
* :truck: Move `globalOwnerRole` fetching to global scope
* :fire: Remove unused imports
* :truck: Move random utils to own module
* :truck: Move test types to own module
* :pencil2: Add dividers to utils
* :pencil2: Reorder `initTestServer` param docstring
* :pencil2: Add TODO comment
* :zap: Dry up member creation
* :zap: Tighten search criteria
* :test_tube: Add expectation to `GET /users`
* :zap: Create role fetcher utils
* :zap: Create one more role fetch util
* :fire: Remove unneeded DB query
* :test_tube: Add expectation to `POST /users`
* :test_tube: Add expectation to `DELETE /users/:id`
* :test_tube: Add another expectation to `DELETE /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :test_tube: Adjust expectations in `POST /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :blue_book: Add namespace name to type
* :truck: Adjust imports
* :zap: Optimize `globalOwnerRole` fetching
* :test_tube: Add expectations
* :shirt: Fix build
* :shirt: Fix build
* :zap: Update method
* :zap: Update method
* :test_tube: Fix `POST /change-password` test
* :blue_book: Fix `userToDelete` type
* :zap: Refactor `createAgent()`
* :zap: Make role fetching global
* :zap: Optimize roles fetching
* :zap: Centralize member creation
* :zap: Refactor truncation helper
* :test_tube: Add teardown to `DELETE /users/:id`
* :test_tube: Add DB expectations to users tests
* :zap: Refactor as in users namespace
* :test_tube: Add expectation to `POST /change-password`
* :fire: Remove pass validation due to hash
* :pencil2: Improve pass validation error message
* :zap: Improve owner pass validation
* :zap: Create logger initialization helper
* :zap: Optimize helpers
* :zap: Restructure `getAllRoles` helper
* :zap: Update `truncate` calls
* :bug: return 200 for non-existing user
* ✅ fix tests for forgot-password and user creation
* Update packages/editor-ui/src/components/MainSidebar.vue
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/components/Telemetry.vue
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/plugins/telemetry/index.ts
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/plugins/telemetry/index.ts
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/plugins/telemetry/index.ts
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* :truck: Fix imports
* :zap: reset password just if password exists
* Fix validation at `PATCH /workfows/:id` (#2819)
* :bug: Validate entity only if workflow
* :shirt: Fix build
* 🔨 refactor response from user creation
* 🐛 um email invite fix (#2833)
* update users invite
* fix notificaitons stacking on top of each other
* remove unnessary check
* fix type issues
* update structure
* fix types
* 🐘 database migrations UM + password reset expiration (#2710)
* Add table prefix and assign existing workflows and credentials to owner for sqlite
* Added user management migration to MySQL
* Fixed some missing table prefixes and removed unnecessary user id
* Created migration for postgres and applies minor fixes
* Fixed migration for sqlite by removing the unnecessary index and for mysql by removing unnecessary user data
* Added password reset token expiration
* Addressing comments made by Ben
* ⚡️ add missing tablePrefix
* ✅ fix tests + add tests for expiring pw-reset-token
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* :zap: treat skipped personalizationSurvey as not answered
* :bug: removing active workflows when deleting user, :bug: fix reinvite, :bug: fix resolve-signup-token, 🐘 remove workflowname uniqueness
* ✅ Add DB state check tests (#2841)
* :fire: Remove unneeded import
* :fire: Remove unneeded vars
* :pencil2: Improve naming
* :test_tube: Add expectations to `POST /owner`
* :test_tube: Add expectations to `PATCH /me`
* :test_tube: Add expectation to `PATCH /me/password`
* :pencil2: Clarify when owner is owner shell
* :test_tube: Add more expectations
* :rewind: Restore package-lock to parent branch state
* Add logging to user management endpoints v2 (#2836)
* :zap: Initialize logger in tests
* :zap: Add logs to mailer
* :zap: Add logs to middleware
* :zap: Add logs to me endpoints
* :zap: Add logs to owner endpoints
* :zap: Add logs to pass flow endpoints
* :zap: Add logs to users endpoints
* :blue_book: Improve typings
* :zap: Merge two logs into one
* :zap: Adjust log type
* :zap: Add password reset email log
* :pencil2: Reword log message
* :zap: Adjust log meta object
* :zap: Add total to log
* :pencil2: Add detail to log message
* :pencil2: Reword log message
* :pencil2: Reword log message
* :bug: Make total users to set up accurate
* :pencil2: Reword `Logger.debug()` messages
* :pencil2: Phrasing change for consistency
* :bug: Fix ID overridden in range query
* :hammer: small refactoring
* 🔐 add auth to push-connection
* 🛠 ✅ Create credentials namespace and add tests (#2831)
* :test_tube: Fix failing test
* :blue_book: Improve `createAgent` signature
* :truck: Fix `LoggerProxy` import
* :sparkles: Create credentials endpoints namespace
* :test_tube: Set up initial tests
* :zap: Add validation to model
* :zap: Adjust validation
* :test_tube: Add test
* :truck: Sort creds endpoints
* :pencil2: Plan out pending tests
* :test_tube: Add deletion tests
* :test_tube: Add patch tests
* :test_tube: Add get cred tests
* :truck: Hoist import
* :pencil2: Make test descriptions consistent
* :pencil2: Adjust description
* :test_tube: Add missing test
* :pencil2: Make get descriptions consistent
* :rewind: Undo line break
* :zap: Refactor to simplify `saveCredential`
* :test_tube: Add non-owned tests for owner
* :pencil2: Improve naming
* :pencil2: Add clarifying comments
* :truck: Improve imports
* :zap: Initialize config file
* :fire: Remove unneeded import
* :truck: Rename dir
* :zap: Adjust deletion call
* :zap: Adjust error code
* :pencil2: Touch up comment
* :zap: Optimize fetching with `@RelationId`
* :test_tube: Add expectations
* :zap: Simplify mock calls
* :blue_book: Set deep readonly to object constants
* :fire: Remove unused param and encryption key
* :zap: Add more `@RelationId` calls in models
* :rewind: Restore
* :bug: no auth for .svg
* 🛠 move auth cookie name to constant; 🐛 fix auth for push-connection
* ✅ Add auth middleware tests (#2853)
* :zap: Simplify existing suite
* :test_tube: Validate that auth cookie exists
* :pencil2: Move comment
* :fire: Remove unneeded imports
* :pencil2: Add clarifying comments
* :pencil2: Document auth endpoints
* :test_tube: Add middleware tests
* :pencil2: Fix typos
Co-authored-by: Ben Hesseldieck <1849459+BHesseldieck@users.noreply.github.com>
* 🔥 Remove test description wrappers (#2874)
* :fire: Remove /owner test wrappers
* :fire: Remove auth middleware test wrappers
* :fire: Remove auth endpoints test wrappers
* :fire: Remove overlooked middleware wrappers
* :fire: Remove me namespace test wrappers
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* ✨ Runtime checks for credentials load and execute workflows (#2697)
* Runtime checks for credentials load and execute workflows
* Fixed from reviewers
* Changed runtime validation for credentials to be on start instead of on demand
* Refactored validations to use user id instead of whole User instance
* Removed user entity from workflow project because it is no longer needed
* General fixes and improvements to runtime checks
* Remove query builder and improve styling
* Fix lint issues
* :zap: remove personalizationAnswers when fetching all users
* ✅ fix failing get all users test
* ✅ check authorization routes also for authentication
* :bug: fix defaults in reset command
* 🛠 refactorings from walkthrough (#2856)
* :zap: Make `getTemplate` async
* :zap: Remove query builder from `getCredentials`
* :zap: Add save manual executions log message
* :rewind: Restore and hide migrations logs
* :zap: Centralize ignore paths check
* :shirt: Fix build
* :truck: Rename `hasOwner` to `isInstanceOwnerSetUp`
* :zap: Add `isSetUp` flag to `User`
* :zap: Add `isSetUp` to FE interface
* :zap: Adjust `isSetUp` checks on FE
* :shirt: Fix build
* :zap: Adjust `isPendingUser()` check
* :truck: Shorten helper name
* :zap: Refactor as `isPending` per feedback
* :pencil2: Update log message
* :zap: Broaden check
* :fire: Remove unneeded relation
* :zap: Refactor query
* :fire: Re-remove logs from migrations
* 🛠 set up credentials router (#2882)
* :zap: Refactor creds endpoints into router
* :test_tube: Refactor creds tests to use router
* :truck: Rename arg for consistency
* :truck: Move `credentials.api.ts` outside /public
* :truck: Rename constant for consistency
* :blue_book: Simplify types
* :fire: Remove unneeded arg
* :truck: Rename router to controller
* :zap: Shorten endpoint
* :zap: Update `initTestServer()` arg
* :zap: Mutate response body in GET /credentials
* 🏎 improve performance of type cast for FE
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* :bug: remove GET /login from auth
* 🔀 merge master + FE update (#2905)
* :sparkles: Add Templates (#2720)
* Templates Bugs / Fixed Various Bugs / Multiply Api Request, Carousel Gradient, Core Nodes Filters ...
* Updated MainSidebar Paddings
* N8N-Templates Bugfixing - Remove Unnecesairy Icon (Shape), Refatctor infiniteScrollEnabled Prop + updated infiniterScroll functinality
* N8N-2853 Fixed Carousel Arrows Bug after Cleaning the SearchBar
* fix telemetry init
* fix search tracking issues
* N8N-2853 Created FilterTemplateNode Constant Array, Filter PlayButton and WebhookRespond from Nodes, Added Box for showing more nodes inside TemplateList, Updated NewWorkflowButton to primary, Fixed Markdown issue with Code
* N8N-2853 Removed Placeholder if Workflows Or Collections are not found, Updated the Logic
* fix telemetry events
* clean up session id
* update user inserted event
* N8N-2853 Fixed Categories to Moving if the names are long
* Add todos
* Update Routes on loading
* fix spacing
* Update Border Color
* Update Border Readius
* fix filter fn
* fix constant, console error
* N8N-2853 PR Fixes, Refactoring, Removing unnecesairy code ..
* N8N-2853 PR Fixes - Editor-ui Fixes, Refactoring, Removing Dead Code ...
* N8N-2853 Refactor Card to LongCard
* clean up spacing, replace css var
* clean up spacing
* set categories as optional in node
* replace vars
* refactor store
* remove unnesssary import
* fix error
* fix templates view to start
* add to cache
* fix coll view data
* fix categories
* fix category event
* fix collections carousel
* fix initial load and search
* fix infinite load
* fix query param
* fix scrolling issues
* fix scroll to top
* fix search
* fix collections search
* fix navigation bug
* rename view
* update package lock
* rename workflow view
* rename coll view
* update routes
* add wrapper component
* set session id
* fix search tracking
* fix session tracking
* remove deleted mutation
* remove check for unsupported nodes
* refactor filters
* lazy load template
* clean up types
* refactor infinte scroll
* fix end of search
* Fix spacing
* fix coll loading
* fix types
* fix coll view list
* fix navigation
* rename types
* rename state
* fix search responsiveness
* fix coll view spacing
* fix search view spacing
* clean up views
* set background color
* center page not vert
* fix workflow view
* remove import
* fix background color
* fix background
* clean props
* clean up imports
* refactor button
* update background color
* fix spacing issue
* rename event
* update telemetry event
* update endpoints, add loading view, check for endpoint health
* remove conolse log
* N8N-2853 Fixed Menu Items Padding
* replace endpoints
* fix type issues
* fix categories
* N8N-2853 Fixed ParameterInput Placeholder after ElementUI Upgrade
* update createdAt
* :zap: Fix placeholder in creds config modal
* :pencil2: Adjust docstring to `credText` placeholder version
* N8N-2853 Optimized
* N8N-2853 Optimized code
* :zap: Add deployment type to FE settings
* :zap: Add deployment type to interfaces
* N8N-2853 Removed Animated prop from components
* :zap: Add deployment type to store module
* :sparkles: Create hiring banner
* :zap: Display hiring banner
* :rewind: Undo unrelated change
* N8N-2853 Refactor TemplateFilters
* :zap: Fix indentation
* N8N-2853 Reorder items / TemplateList
* :shirt: Fix lint
* N8N-2853 Refactor TemplateFilters Component
* N8N-2853 Reorder TemplateList
* refactor template card
* update timeout
* fix removelistener
* fix spacing
* split enabled from offline
* add spacing to go back
* N8N-2853 Fixed Screens for Tablet & Mobile
* N8N-2853 Update Stores Order
* remove image componet
* remove placeholder changes
* N8N-2853 Fixed Chinnese Placeholders for El Select Component that comes from the Library Upgrade
* N8N-2853 Fixed Vue Agile Console Warnings
* N8N-2853 Update Collection Route
* :pencil2: Update jobs URL
* :truck: Move logging to root component
* :zap: Refactor `deploymentType` to `isInternalUser`
* :zap: Improve syntax
* fix cut bug in readonly view
* N8N-3012 Fixed Details section in templates with lots of description, Fixed Mardown Block with overflox-x
* N8N-3012 Increased Font-size, Spacing and Line-height of the Categories Items
* N8N-3012 Fixed Vue-agile client width error on resize
* only delay redirect for root path
* N8N-3012 Fixed Carousel Arrows that Disappear
* N8N-3012 Make Loading Screen same color as Templates
* N8N-3012 Markdown renders inline block as block code
* add offline warning
* hide log from workflow iframe
* update text
* make search button larger
* N8N-3012 Categories / Tags extended all the way in details section
* load data in cred modals
* remove deleted message
* add external hook
* remove import
* update env variable description
* fix markdown width issue
* disable telemetry for demo, add session id to template pages
* fix telemetery bugs
* N8N-3012 Not found Collections/Wokrkflow
* N8N-3012 Checkboxes change order when categories are changed
* N8N-3012 Refactor SortedCategories inside TemplateFilters component
* fix firefox bug
* add telemetry requirements
* add error check
* N8N-3012 Update GoBackButton to check if Route History is present
* N8N-3012 Fixed WF Nodes Icons
* hide workflow screenshots
* remove unnessary mixins
* rename prop
* fix design a bit
* rename data
* clear workspace on destroy
* fix copy paste bug
* fix disabled state
* N8N-3012 Fixed Saving/Leave without saving Modal
* fix telemetry issue
* fix telemetry issues, error bug
* fix error notification
* disable workflow menu items on templates
* fix i18n elementui issue
* Remove Emit - NodeType from HoverableNodeIcon component
* TechnicalFixes: NavigateTo passed down as function should be helper
* TechnicalFixes: Update NavigateTo function
* TechnicalFixes: Add FilterCoreNodes directly as function
* check for empty connecitions
* fix titles
* respect new lines
* increase categories to be sliced
* rename prop
* onUseWorkflow
* refactor click event
* fix bug, refactor
* fix loading story
* add default
* fix styles at right level of abstraction
* add wrapper with width
* remove loading blocks component
* add story
* rename prop
* fix spacing
* refactor tag, add story
* move margin to container
* fix tag redirect, remove unnessary check
* make version optional
* rename view
* move from workflows to templates store
* remove unnessary change
* remove unnessary css
* rename component
* refactor collection card
* add boolean to prevent shrink
* clean up carousel
* fix redirection bug on save
* remove listeners to fix multiple listeners bug
* remove unnessary types
* clean up boolean set
* fix node select bug
* rename component
* remove unnessary class
* fix redirection bug
* remove unnessary error
* fix typo
* fix blockquotes, pre
* refactor markdown rendering
* remove console log
* escape markdown
* fix safari bug
* load active workflows to fix modal bug
* :arrow_up: Update package-lock.json file
* :zap: Add n8n version as header
Co-authored-by: Mutasem Aldmour <4711238+mutdmour@users.noreply.github.com>
Co-authored-by: Mutasem <mutdmour@gmail.com>
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
* :bookmark: Release n8n-workflow@0.88.0
* :arrow_up: Set n8n-workflow@0.88.0 on n8n-core
* :bookmark: Release n8n-core@0.106.0
* :arrow_up: Set n8n-core@0.106.0 and n8n-workflow@0.88.0 on n8n-node-dev
* :bookmark: Release n8n-node-dev@0.45.0
* :arrow_up: Set n8n-core@0.106.0 and n8n-workflow@0.88.0 on n8n-nodes-base
* :bookmark: Release n8n-nodes-base@0.163.0
* :bookmark: Release n8n-design-system@0.12.0
* :arrow_up: Set n8n-design-system@0.12.0 and n8n-workflow@0.88.0 on n8n-editor-ui
* :bookmark: Release n8n-editor-ui@0.132.0
* :arrow_up: Set n8n-core@0.106.0, n8n-editor-ui@0.132.0, n8n-nodes-base@0.163.0 and n8n-workflow@0.88.0 on n8n
* :bookmark: Release n8n@0.165.0
* fix default user bug
* fix bug
* update package lock
* fix duplicate import
* fix settings
* fix templates access
Co-authored-by: Oliver Trajceski <olivertrajceski@yahoo.com>
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
* :zap: n8n 2952 personalisation (#2911)
* refactor/update survey
* update customers
* Fix up personalization survey
* fix recommendation logic
* set to false
* hide suggested nodes when empty
* use keys
* add missing logic
* switch types
* Fix logic
* remove unused constants
* add back constant
* refactor filtering inputs
* hide last input on personal
* fix other
* ✨ add current pw check for change password (#2912)
* fix back button
* Add current password input
* add to modal
* update package.json
* delete mock file
* delete mock file
* get settings func
* update router
* update package lock
* update package lock
* Fix invite text
* update error i18n
* open personalization on search if not set
* update error view i18n
* update change password
* update settings sidebar
* remove import
* fix sidebar
* :goal_net: fix error for credential/workflow not found
* update invite modal
* ✨ persist skipping owner setup (#2894)
* 🚧 added skipInstanceOwnerSetup to DB + route to save skipping
* ✨ skipping owner setup persists
* ✅ add tests for authorization and /owner/skip-setup
* 🛠 refactor FE settings getter
* 🛠 move setting setup stop to owner creation
* :bug: fix wrong setting of User.isPending
* :bug: fix isPending
* 🏷 add isPending to PublicUser
* :bug: fix unused import
* update delete modal
* change password modal
* remove _label
* sort keys
* remove key
* update key names
* fix test endpoint
* 🥅 Handle error workflows permissions (#2908)
* Handle error workflows permissions
* Fixed wrong query format
* 🛠 refactor query
Co-authored-by: Ben Hesseldieck <1849459+BHesseldieck@users.noreply.github.com>
* fix ts issue
* fix list after ispending changes
* fix error page bugs
* fix error redirect
* fix notification
* :bug: fix survey import in migration
* fix up spacing
* update keys spacing
* update keys
* add space
* update key
* fix up more spacing
* 🔐 add current password (#2919)
* add curr pass
* update key names
* :bug: stringify tag ids
* 🔐 check current password before update
* add package lock
* fix dep version
* update version
* 🐛 fix access for instance owner to credentials (#2927)
* 🛠 stringify tag id on entity
* 🔐 Update password requirements (#2920)
* :zap: Update password requirements
* :zap: Adjust random helpers
* ✅ fix tests for currentPassword check
* change redirection, add homepage
* fix error view redirection
* updated wording
* fix setup redirection
* update validator
* remove successfully
* update consumers
* update settings redirect
* on signup, redirect to homepage
* update empty state
* add space to emails
* remove brackets
* add opacity
* update spacing
* remove border from last user
* personal details updated
* update redirect on sign up
* prevent text wrap
* fix notification title line height
* remove console log
* 🐘 Support testing with Postgres and MySQL (#2886)
* :card_file_box: Fix Postgres migrations
* :zap: Add DB-specific scripts
* :sparkles: Set up test connections
* :zap: Add Postgres UUID check
* :test_tube: Make test adjustments for Postgres
* :zap: Refactor connection logic
* :sparkles: Set up double init for Postgres
* :pencil2: Add TODOs
* :zap: Refactor DB dropping logic
* :sparkles: Implement global teardown
* :sparkles: Create TypeORM wrappers
* :sparkles: Initial MySQL setup
* :zap: Clean up Postgres connection options
* :zap: Simplify by sharing bootstrap connection name
* :card_file_box: Fix MySQL migrations
* :fire: Remove comments
* :zap: Use ES6 imports
* :fire: Remove outdated comments
* :zap: Centralize bootstrap connection name handles
* :zap: Centralize database types
* :pencil2: Update comment
* :truck: Rename `findRepository`
* :construction: Attempt to truncate MySQL
* :sparkles: Implement creds router
* :bug: Fix duplicated MySQL bootstrap
* :bug: Fix misresolved merge conflict
* :card_file_box: Fix tags migration
* :card_file_box: Fix MySQL UM migration
* :bug: Fix MySQL parallelization issues
* :blue_book: Augment TypeORM to prevent error
* :fire: Remove comments
* :sparkles: Support one sqlite DB per suite run
* :truck: Move `testDb` to own module
* :fire: Deduplicate bootstrap Postgres logic
* :fire: Remove unneeded comment
* :zap: Make logger init calls consistent
* :pencil2: Improve comment
* :pencil2: Add dividers
* :art: Improve formatting
* :fire: Remove duplicate MySQL global setting
* :truck: Move comment
* :zap: Update default test script
* :fire: Remove unneeded helper
* :zap: Unmarshal answers from Postgres
* :bug: Phase out `isTestRun`
* :zap: Refactor `isEmailSetup`
* :fire: Remove unneeded imports
* :zap: Handle bootstrap connection errors
* :fire: Remove unneeded imports
* :fire: Remove outdated comments
* :pencil2: Fix typos
* :truck: Relocate `answersFormatter`
* :rewind: Undo package.json miscommit
* :fire: Remove unneeded import
* :zap: Refactor test DB prefixing
* :zap: Add no-leftover check to MySQL
* :package: Update package.json
* :zap: Autoincrement on simulated MySQL truncation
* :fire: Remove debugging queries
* ✏️ fix email template link expiry
* 🔥 remove unused import
* ✅ fix testing email not sent error
* fix duplicate import
* add package lock
* fix export
* change opacity
* fix text issue
* update action box
* update error title
* update forgot password
* update survey
* update product text
* remove unset fields
* add category to page events
* remove duplicate import
* update key
* update key
* update label type
* 🎨 um/fe review (#2946)
* :whale: Update Node.js versions of Docker images to 16
* :bug: Fix that some keyboard shortcuts did no longer work
* N8N-3057 Fixed Keyboard shortcuts no longer working on / Fixed callDebounced function
* N8N-3057 Update Debounce Function
* N8N-3057 Refactor callDebounce function
* N8N-3057 Update Dobounce Function
* :bug: Fix issue with tooltips getting displayed behind node details view
* fix tooltips z-index
* move all element ui components
* update package lock
* :bug: Fix credentials list load issue (#2931)
* always fetch credentials
* only fetch credentials once
* :zap: Allow to disable hiring banner (#2902)
* :sparkles: Add flag
* :zap: Adjust interfaces
* :zap: Adjust store module
* :zap: Adjust frontend settings
* :zap: Adjust frontend display
* :bug: Fix issue that ctrl + o did behave wrong on workflow templates page (#2934)
* N8N-3094 Workflow Templates cmd-o acts on the Preview/Iframe
* N8N-3094 Workflow Templates cmd-o acts on the Preview/Iframe
* disable shortcuts for preview
Co-authored-by: Mutasem <mutdmour@gmail.com>
* :arrow_up: Update package-lock.json file
* :bug: Fix sorting by field in Baserow Node (#2942)
This fixes a bug which currently leads to the "Sorting" option of the node to be ignored.
* :bug: Fix some i18n line break issues
* :sparkles: Add Odoo Node (#2601)
* added odoo scaffolding
* update getting data from odoo instance
* added scaffolding for main loop and request functions
* added functions for CRUD opperations
* improoved error handling for odooJSONRPCRequest
* updated odoo node and fixing nodelinter issues
* fixed alpabetical order
* fixed types in odoo node
* fixing linter errors
* fixing linter errors
* fixed data shape returned from man loop
* updated node input types, added fields list to models
* update when custom resource is selected options for fields list will be populated dynamicly
* minor fixes
* :hammer: fixed credential test, updating CRUD methods
* :hammer: added additional fields to crm resource
* :hammer: added descriptions, fixed credentials test bug
* :hammer: standardize node and descriptions design
* :hammer: removed comments
* :hammer: added pagination to getAll operation
* :zap: removed leftover function from previous implementation, removed required from optional fields
* :zap: fixed id field, added indication of type and if required to field description, replaced string input in filters to fetched list of fields
* :hammer: fetching list of models from odoo, added selection of fields to be returned to predefined models, fixes accordingly to review
* :zap: Small improvements
* :hammer: extracted adress fields into collection, changed fields to include in descriptions, minor tweaks
* :zap: Improvements
* :hammer: working on review
* :hammer: fixed linter errors
* :hammer: review wip
* :hammer: review wip
* :hammer: review wip
* :zap: updated display name for URL in credentials
* :hammer: added checks for valid id to delete and update
* :zap: Minor improvements
Co-authored-by: ricardo <ricardoespinoza105@gmail.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
* :bug: Handle Wise SCA requests (#2734)
* :zap: Improve Wise error message after previous change
* fix duplicate import
* add package lock
* fix export
* change opacity
* fix text issue
* update action box
* update error title
* update forgot password
* update survey
* update product text
* remove unset fields
* add category to page events
* remove duplicate import
* update key
* update key
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
Co-authored-by: Oliver Trajceski <olivertrajceski@yahoo.com>
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Tom <19203795+that-one-tom@users.noreply.github.com>
Co-authored-by: Michael Kret <88898367+michael-radency@users.noreply.github.com>
Co-authored-by: ricardo <ricardoespinoza105@gmail.com>
Co-authored-by: pemontto <939704+pemontto@users.noreply.github.com>
* Move owner skip from settings
* 🐛 SMTP fixes (#2937)
* :fire: Remove `UM_` from SMTP env vars
* :fire: Remove SMTP host default value
* :zap: Update sender value
* :zap: Update invite template
* :zap: Update password reset template
* :zap: Update `N8N_EMAIL_MODE` default value
* :fire: Remove `EMAIL` from all SMTP vars
* :sparkles: Implement `verifyConnection()`
* :truck: Reposition comment
* :pencil2: Fix typo
* :pencil2: Minor env var documentation improvements
* :art: Fix spacing
* :art: Fix spacing
* :card_file_box: Remove SMTP settings cache
* :zap: Adjust log message
* :zap: Update error message
* :pencil2: Fix template typo
* :pencil2: Adjust wording
* :zap: Interpolate email into success toast
* :pencil2: Adjust base message in `verifyConnection()`
* :zap: Verify connection on password reset
* :zap: Bring up POST /users SMTP check
* :bug: remove cookie if cookie is not valid
* :zap: verify connection on instantiation
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* 🔊 create logger helper for migrations (#2944)
* 🔥 remove unused database
* :loud_sound: add migration logging for sqlite
* 🔥 remove unnecessary index creation
* ⚡️ change log level to warn
* 🐛 Fix issue with workflow process to initialize db connection correctly (#2948)
* ✏️ update error messages for webhhook run/activation
* 📈 Implement telemetry events (#2868)
* Implement basic telemetry events
* Fixing user id as part of the telemetry data
* Added user id to be part of the tracked data
* :sparkles: Create telemetry mock
* :test_tube: Fix tests with telemetry mock
* :test_tube: Fix missing key in authless endpoint
* :blue_book: Create authless request type
* :fire: Remove log
* :bug: Fix `migration_strategy` assignment
* :blue_book: Remove `instance_id` from `ITelemetryUserDeletionData`
* :zap: Simplify concatenation
* :zap: Simplify `track()` call signature
* Fixed payload of telemetry to always include user_id
* Fixing minor issues
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
* 🔊 Added logs to credentials, executions and workflows (#2915)
* Added logs to credentials, executions and workflows
* Some updates according to ivov's feedback
* :zap: update log levels
* ✅ fix tests
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* :bug: fix telemetry error
* fix conflicts with master
* fix duplicate
* add package-lock
* :bug: Um/fixes (#2952)
* add initials to avatar
* redirect to signin if invalid token
* update pluralization
* add auth page category
* data transferred
* touch up setup page
* update button to add cursor
* fix personalization modal not closing
* ✏️ fix environment name
* 🐛 fix disabling UM
* 🐛 fix email setup flag
* 🐛 FE fixes 1 (#2953)
* add initials to avatar
* redirect to signin if invalid token
* update pluralization
* add auth page category
* data transferred
* touch up setup page
* update button to add cursor
* fix personalization modal not closing
* capitalize labels, refactor text
* Fixed the issue with telemetry data missing for personalization survey
* Changed invite email text
* 🐛 Fix quotes issue with postgres migration (#2958)
* Changed text for invite link
* 🐛 fix reset command for mysql
* ✅ fix race condition in test DB creation
* 🔐 block user creation if UM is disabled
* 🥅 improve smtp setup issue error
* :zap: update error message
* refactor route rules
* set package lock
* fix access
* remove capitalize
* update input labels
* refactor heading
* change span to fragment
* add route types
* refactor views
* ✅ fix increase timeout for mysql
* :zap: correct logic of error message
* refactor view names
* :zap: update randomString
* 📈 Added missing event regarding failed emails (#2964)
* replace label with info
* 🛠 refactor JWT-secret creation
* remove duplicate key
* remove unused part
* remove semicolon
* fix up i18n pattern
* update translation keys
* update urls
* support i18n in nds
* fix how external keys are handled
* add source
* 💥 update timestamp of UM migration
* ✏️ small message updates
* fix tracking
* update notification line-height
* fix avatar opacity
* fix up empty state
* shift focus to input
* 🔐 Disable basic auth after owner has been set up (#2973)
* Disable basic auth after owner has been set up
* Remove unnecessary comparison
* rename modal title
* 🐛 use pgcrypto extension for uuid creation (#2977)
* 📧 Added public url variable for emails (#2967)
* Added public url variable for emails
* Fixed base url for reset password - the current implementation overrides possibly existing path
* Change variable name to editorUrl
* Using correct name editorUrl for emails
* Changed variable description
* Improved base url naming and appending path so it remains consistent
* Removed trailing slash from editor base url
* 🌐 fix i18n pattern (#2970)
* fix up i18n pattern
* update translation keys
* update urls
* support i18n in nds
* fix how external keys are handled
* add source
* Um/fixes 1000 (#2980)
* fix select issue
* 😫 hacky solution to circumvent pgcrypto (#2979)
* fix owner bug after transfer. always fetch latest credentials
* add confirmation modal to setup
* Use webhook url as fallback when editor url is not defined
* fix enter bug
* update modal
* update modal
* update modal text, fix bug in settings view
* Updating editor url to not append path
* rename keys
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Mutasem Aldmour <4711238+mutdmour@users.noreply.github.com>
Co-authored-by: Mutasem <mutdmour@gmail.com>
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
Co-authored-by: Omar Ajoue <krynble@gmail.com>
Co-authored-by: Oliver Trajceski <olivertrajceski@yahoo.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
Co-authored-by: Tom <19203795+that-one-tom@users.noreply.github.com>
Co-authored-by: Michael Kret <88898367+michael-radency@users.noreply.github.com>
Co-authored-by: ricardo <ricardoespinoza105@gmail.com>
Co-authored-by: pemontto <939704+pemontto@users.noreply.github.com>
2022-03-14 06:46:32 -07:00
|
|
|
data.userId,
|
2021-08-20 09:57:30 -07:00
|
|
|
undefined,
|
|
|
|
workflowTimeout <= 0 ? undefined : Date.now() + workflowTimeout * 1000,
|
|
|
|
);
|
2023-04-06 01:18:19 -07:00
|
|
|
additionalData.restartExecutionId = restartExecutionId;
|
2020-01-17 17:34:31 -08:00
|
|
|
|
|
|
|
// Register the active execution
|
2021-08-21 05:11:32 -07:00
|
|
|
const executionId = await this.activeExecutions.add(data, undefined, restartExecutionId);
|
|
|
|
additionalData.executionId = executionId;
|
|
|
|
|
|
|
|
Logger.verbose(
|
|
|
|
`Execution for workflow ${data.workflowData.name} was assigned id ${executionId}`,
|
|
|
|
{ executionId },
|
|
|
|
);
|
2021-07-10 02:34:41 -07:00
|
|
|
let workflowExecution: PCancelable<IRun>;
|
2020-01-17 17:34:31 -08:00
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
try {
|
2021-07-11 09:05:44 -07:00
|
|
|
Logger.verbose(
|
|
|
|
`Execution for workflow ${data.workflowData.name} was assigned id ${executionId}`,
|
|
|
|
{ executionId },
|
|
|
|
);
|
feat: Add User Management (#2636)
* ✅ adjust tests
* 🛠 refactor user invites to be indempotent (#2791)
* 🔐 Encrypt SMTP pass for user management backend (#2793)
* :package: Add crypto-js to /cli
* :package: Update package-lock.json
* :sparkles: Create type for SMTP config
* :zap: Encrypt SMTP pass
* :zap: Update format for `userManagement.emails.mode`
* :zap: Update format for `binaryDataManager.mode`
* :zap: Update format for `logs.level`
* :fire: Remove logging
* :shirt: Fix lint
* 👰 n8n 2826 um wedding FE<>BE (#2789)
* remove mocks
* update authorization func
* lock down default role
* 🐛 fix requiring authentication for OPTIONS requests
* :bug: fix cors and cookie issues in dev
* update setup route
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* update telemetry
* 🐛 preload role for users
* :bug: remove auth for password reset routes
* 🐛 fix forgot-password flow
* :zap: allow workflow tag disabling
* update telemetry init
* add reset
* clear error notifications on signin
* remove load settings from node view
* remove user id from user state
* inherit existing user props
* go back in history on button click
* use replace to force redirect
* update stories
* :zap: add env check for tag create
* :test_tube: Add `/users` tests for user management backend (#2790)
* :zap: Refactor users namespace
* :zap: Adjust fillout endpoint
* :zap: Refactor initTestServer arg
* :pencil2: Specify agent type
* :pencil2: Specify role type
* :zap: Tighten `/users/:id` check
* :sparkles: Add initial tests
* :truck: Reposition init server map
* :zap: Set constants in `validatePassword()`
* :zap: Tighten `/users/:id` check
* :zap: Improve checks in `/users/:id`
* :sparkles: Add tests for `/users/:id`
* :package: Update package-lock.json
* :zap: Simplify expectation
* :zap: Reuse util for authless agent
* :truck: Make role names consistent
* :blue_book: Tighten namespaces map type
* :fire: Remove unneeded default arg
* :sparkles: Add tests for `POST /users`
* :blue_book: Create test SMTP account type
* :pencil2: Improve wording
* :art: Formatting
* :fire: Remove temp fix
* :zap: Replace helper with config call
* :zap: Fix failing tests
* :fire: Remove outdated test
* :fire: Remove unused helper
* :zap: Increase readability of domain fetcher
* :zap: Refactor payload validation
* :fire: Remove repetition
* :rewind: Restore logging
* :zap: Initialize logger in tests
* :fire: Remove redundancy from check
* :truck: Move `globalOwnerRole` fetching to global scope
* :fire: Remove unused imports
* :truck: Move random utils to own module
* :truck: Move test types to own module
* :pencil2: Add dividers to utils
* :pencil2: Reorder `initTestServer` param docstring
* :pencil2: Add TODO comment
* :zap: Dry up member creation
* :zap: Tighten search criteria
* :test_tube: Add expectation to `GET /users`
* :zap: Create role fetcher utils
* :zap: Create one more role fetch util
* :fire: Remove unneeded DB query
* :test_tube: Add expectation to `POST /users`
* :test_tube: Add expectation to `DELETE /users/:id`
* :test_tube: Add another expectation to `DELETE /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :test_tube: Adjust expectations in `POST /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :shirt: Fix build
* :zap: Update method
* :blue_book: Fix `userToDelete` type
* :zap: Refactor `createAgent()`
* :zap: Make role fetching global
* :zap: Optimize roles fetching
* :zap: Centralize member creation
* :zap: Refactor truncation helper
* :test_tube: Add teardown to `DELETE /users/:id`
* :test_tube: Add DB expectations to users tests
* :fire: Remove pass validation due to hash
* :pencil2: Improve pass validation error message
* :zap: Improve owner pass validation
* :zap: Create logger initialization helper
* :zap: Optimize helpers
* :zap: Restructure `getAllRoles` helper
* :test_tube: Add password reset flow tests for user management backend (#2807)
* :zap: Refactor users namespace
* :zap: Adjust fillout endpoint
* :zap: Refactor initTestServer arg
* :pencil2: Specify agent type
* :pencil2: Specify role type
* :zap: Tighten `/users/:id` check
* :sparkles: Add initial tests
* :truck: Reposition init server map
* :zap: Set constants in `validatePassword()`
* :zap: Tighten `/users/:id` check
* :zap: Improve checks in `/users/:id`
* :sparkles: Add tests for `/users/:id`
* :package: Update package-lock.json
* :zap: Simplify expectation
* :zap: Reuse util for authless agent
* :truck: Make role names consistent
* :blue_book: Tighten namespaces map type
* :fire: Remove unneeded default arg
* :sparkles: Add tests for `POST /users`
* :blue_book: Create test SMTP account type
* :pencil2: Improve wording
* :art: Formatting
* :fire: Remove temp fix
* :zap: Replace helper with config call
* :zap: Fix failing tests
* :fire: Remove outdated test
* :sparkles: Add tests for password reset flow
* :pencil2: Fix test wording
* :zap: Set password reset namespace
* :fire: Remove unused helper
* :zap: Increase readability of domain fetcher
* :zap: Refactor payload validation
* :fire: Remove repetition
* :rewind: Restore logging
* :zap: Initialize logger in tests
* :fire: Remove redundancy from check
* :truck: Move `globalOwnerRole` fetching to global scope
* :fire: Remove unused imports
* :truck: Move random utils to own module
* :truck: Move test types to own module
* :pencil2: Add dividers to utils
* :pencil2: Reorder `initTestServer` param docstring
* :pencil2: Add TODO comment
* :zap: Dry up member creation
* :zap: Tighten search criteria
* :test_tube: Add expectation to `GET /users`
* :zap: Create role fetcher utils
* :zap: Create one more role fetch util
* :fire: Remove unneeded DB query
* :test_tube: Add expectation to `POST /users`
* :test_tube: Add expectation to `DELETE /users/:id`
* :test_tube: Add another expectation to `DELETE /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :test_tube: Adjust expectations in `POST /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :blue_book: Add namespace name to type
* :truck: Adjust imports
* :zap: Optimize `globalOwnerRole` fetching
* :test_tube: Add expectations
* :shirt: Fix build
* :shirt: Fix build
* :zap: Update method
* :zap: Update method
* :test_tube: Fix `POST /change-password` test
* :blue_book: Fix `userToDelete` type
* :zap: Refactor `createAgent()`
* :zap: Make role fetching global
* :zap: Optimize roles fetching
* :zap: Centralize member creation
* :zap: Refactor truncation helper
* :test_tube: Add teardown to `DELETE /users/:id`
* :test_tube: Add DB expectations to users tests
* :zap: Refactor as in users namespace
* :test_tube: Add expectation to `POST /change-password`
* :fire: Remove pass validation due to hash
* :pencil2: Improve pass validation error message
* :zap: Improve owner pass validation
* :zap: Create logger initialization helper
* :zap: Optimize helpers
* :zap: Restructure `getAllRoles` helper
* :zap: Update `truncate` calls
* :bug: return 200 for non-existing user
* ✅ fix tests for forgot-password and user creation
* Update packages/editor-ui/src/components/MainSidebar.vue
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/components/Telemetry.vue
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/plugins/telemetry/index.ts
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/plugins/telemetry/index.ts
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/plugins/telemetry/index.ts
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* :truck: Fix imports
* :zap: reset password just if password exists
* Fix validation at `PATCH /workfows/:id` (#2819)
* :bug: Validate entity only if workflow
* :shirt: Fix build
* 🔨 refactor response from user creation
* 🐛 um email invite fix (#2833)
* update users invite
* fix notificaitons stacking on top of each other
* remove unnessary check
* fix type issues
* update structure
* fix types
* 🐘 database migrations UM + password reset expiration (#2710)
* Add table prefix and assign existing workflows and credentials to owner for sqlite
* Added user management migration to MySQL
* Fixed some missing table prefixes and removed unnecessary user id
* Created migration for postgres and applies minor fixes
* Fixed migration for sqlite by removing the unnecessary index and for mysql by removing unnecessary user data
* Added password reset token expiration
* Addressing comments made by Ben
* ⚡️ add missing tablePrefix
* ✅ fix tests + add tests for expiring pw-reset-token
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* :zap: treat skipped personalizationSurvey as not answered
* :bug: removing active workflows when deleting user, :bug: fix reinvite, :bug: fix resolve-signup-token, 🐘 remove workflowname uniqueness
* ✅ Add DB state check tests (#2841)
* :fire: Remove unneeded import
* :fire: Remove unneeded vars
* :pencil2: Improve naming
* :test_tube: Add expectations to `POST /owner`
* :test_tube: Add expectations to `PATCH /me`
* :test_tube: Add expectation to `PATCH /me/password`
* :pencil2: Clarify when owner is owner shell
* :test_tube: Add more expectations
* :rewind: Restore package-lock to parent branch state
* Add logging to user management endpoints v2 (#2836)
* :zap: Initialize logger in tests
* :zap: Add logs to mailer
* :zap: Add logs to middleware
* :zap: Add logs to me endpoints
* :zap: Add logs to owner endpoints
* :zap: Add logs to pass flow endpoints
* :zap: Add logs to users endpoints
* :blue_book: Improve typings
* :zap: Merge two logs into one
* :zap: Adjust log type
* :zap: Add password reset email log
* :pencil2: Reword log message
* :zap: Adjust log meta object
* :zap: Add total to log
* :pencil2: Add detail to log message
* :pencil2: Reword log message
* :pencil2: Reword log message
* :bug: Make total users to set up accurate
* :pencil2: Reword `Logger.debug()` messages
* :pencil2: Phrasing change for consistency
* :bug: Fix ID overridden in range query
* :hammer: small refactoring
* 🔐 add auth to push-connection
* 🛠 ✅ Create credentials namespace and add tests (#2831)
* :test_tube: Fix failing test
* :blue_book: Improve `createAgent` signature
* :truck: Fix `LoggerProxy` import
* :sparkles: Create credentials endpoints namespace
* :test_tube: Set up initial tests
* :zap: Add validation to model
* :zap: Adjust validation
* :test_tube: Add test
* :truck: Sort creds endpoints
* :pencil2: Plan out pending tests
* :test_tube: Add deletion tests
* :test_tube: Add patch tests
* :test_tube: Add get cred tests
* :truck: Hoist import
* :pencil2: Make test descriptions consistent
* :pencil2: Adjust description
* :test_tube: Add missing test
* :pencil2: Make get descriptions consistent
* :rewind: Undo line break
* :zap: Refactor to simplify `saveCredential`
* :test_tube: Add non-owned tests for owner
* :pencil2: Improve naming
* :pencil2: Add clarifying comments
* :truck: Improve imports
* :zap: Initialize config file
* :fire: Remove unneeded import
* :truck: Rename dir
* :zap: Adjust deletion call
* :zap: Adjust error code
* :pencil2: Touch up comment
* :zap: Optimize fetching with `@RelationId`
* :test_tube: Add expectations
* :zap: Simplify mock calls
* :blue_book: Set deep readonly to object constants
* :fire: Remove unused param and encryption key
* :zap: Add more `@RelationId` calls in models
* :rewind: Restore
* :bug: no auth for .svg
* 🛠 move auth cookie name to constant; 🐛 fix auth for push-connection
* ✅ Add auth middleware tests (#2853)
* :zap: Simplify existing suite
* :test_tube: Validate that auth cookie exists
* :pencil2: Move comment
* :fire: Remove unneeded imports
* :pencil2: Add clarifying comments
* :pencil2: Document auth endpoints
* :test_tube: Add middleware tests
* :pencil2: Fix typos
Co-authored-by: Ben Hesseldieck <1849459+BHesseldieck@users.noreply.github.com>
* 🔥 Remove test description wrappers (#2874)
* :fire: Remove /owner test wrappers
* :fire: Remove auth middleware test wrappers
* :fire: Remove auth endpoints test wrappers
* :fire: Remove overlooked middleware wrappers
* :fire: Remove me namespace test wrappers
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* ✨ Runtime checks for credentials load and execute workflows (#2697)
* Runtime checks for credentials load and execute workflows
* Fixed from reviewers
* Changed runtime validation for credentials to be on start instead of on demand
* Refactored validations to use user id instead of whole User instance
* Removed user entity from workflow project because it is no longer needed
* General fixes and improvements to runtime checks
* Remove query builder and improve styling
* Fix lint issues
* :zap: remove personalizationAnswers when fetching all users
* ✅ fix failing get all users test
* ✅ check authorization routes also for authentication
* :bug: fix defaults in reset command
* 🛠 refactorings from walkthrough (#2856)
* :zap: Make `getTemplate` async
* :zap: Remove query builder from `getCredentials`
* :zap: Add save manual executions log message
* :rewind: Restore and hide migrations logs
* :zap: Centralize ignore paths check
* :shirt: Fix build
* :truck: Rename `hasOwner` to `isInstanceOwnerSetUp`
* :zap: Add `isSetUp` flag to `User`
* :zap: Add `isSetUp` to FE interface
* :zap: Adjust `isSetUp` checks on FE
* :shirt: Fix build
* :zap: Adjust `isPendingUser()` check
* :truck: Shorten helper name
* :zap: Refactor as `isPending` per feedback
* :pencil2: Update log message
* :zap: Broaden check
* :fire: Remove unneeded relation
* :zap: Refactor query
* :fire: Re-remove logs from migrations
* 🛠 set up credentials router (#2882)
* :zap: Refactor creds endpoints into router
* :test_tube: Refactor creds tests to use router
* :truck: Rename arg for consistency
* :truck: Move `credentials.api.ts` outside /public
* :truck: Rename constant for consistency
* :blue_book: Simplify types
* :fire: Remove unneeded arg
* :truck: Rename router to controller
* :zap: Shorten endpoint
* :zap: Update `initTestServer()` arg
* :zap: Mutate response body in GET /credentials
* 🏎 improve performance of type cast for FE
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* :bug: remove GET /login from auth
* 🔀 merge master + FE update (#2905)
* :sparkles: Add Templates (#2720)
* Templates Bugs / Fixed Various Bugs / Multiply Api Request, Carousel Gradient, Core Nodes Filters ...
* Updated MainSidebar Paddings
* N8N-Templates Bugfixing - Remove Unnecesairy Icon (Shape), Refatctor infiniteScrollEnabled Prop + updated infiniterScroll functinality
* N8N-2853 Fixed Carousel Arrows Bug after Cleaning the SearchBar
* fix telemetry init
* fix search tracking issues
* N8N-2853 Created FilterTemplateNode Constant Array, Filter PlayButton and WebhookRespond from Nodes, Added Box for showing more nodes inside TemplateList, Updated NewWorkflowButton to primary, Fixed Markdown issue with Code
* N8N-2853 Removed Placeholder if Workflows Or Collections are not found, Updated the Logic
* fix telemetry events
* clean up session id
* update user inserted event
* N8N-2853 Fixed Categories to Moving if the names are long
* Add todos
* Update Routes on loading
* fix spacing
* Update Border Color
* Update Border Readius
* fix filter fn
* fix constant, console error
* N8N-2853 PR Fixes, Refactoring, Removing unnecesairy code ..
* N8N-2853 PR Fixes - Editor-ui Fixes, Refactoring, Removing Dead Code ...
* N8N-2853 Refactor Card to LongCard
* clean up spacing, replace css var
* clean up spacing
* set categories as optional in node
* replace vars
* refactor store
* remove unnesssary import
* fix error
* fix templates view to start
* add to cache
* fix coll view data
* fix categories
* fix category event
* fix collections carousel
* fix initial load and search
* fix infinite load
* fix query param
* fix scrolling issues
* fix scroll to top
* fix search
* fix collections search
* fix navigation bug
* rename view
* update package lock
* rename workflow view
* rename coll view
* update routes
* add wrapper component
* set session id
* fix search tracking
* fix session tracking
* remove deleted mutation
* remove check for unsupported nodes
* refactor filters
* lazy load template
* clean up types
* refactor infinte scroll
* fix end of search
* Fix spacing
* fix coll loading
* fix types
* fix coll view list
* fix navigation
* rename types
* rename state
* fix search responsiveness
* fix coll view spacing
* fix search view spacing
* clean up views
* set background color
* center page not vert
* fix workflow view
* remove import
* fix background color
* fix background
* clean props
* clean up imports
* refactor button
* update background color
* fix spacing issue
* rename event
* update telemetry event
* update endpoints, add loading view, check for endpoint health
* remove conolse log
* N8N-2853 Fixed Menu Items Padding
* replace endpoints
* fix type issues
* fix categories
* N8N-2853 Fixed ParameterInput Placeholder after ElementUI Upgrade
* update createdAt
* :zap: Fix placeholder in creds config modal
* :pencil2: Adjust docstring to `credText` placeholder version
* N8N-2853 Optimized
* N8N-2853 Optimized code
* :zap: Add deployment type to FE settings
* :zap: Add deployment type to interfaces
* N8N-2853 Removed Animated prop from components
* :zap: Add deployment type to store module
* :sparkles: Create hiring banner
* :zap: Display hiring banner
* :rewind: Undo unrelated change
* N8N-2853 Refactor TemplateFilters
* :zap: Fix indentation
* N8N-2853 Reorder items / TemplateList
* :shirt: Fix lint
* N8N-2853 Refactor TemplateFilters Component
* N8N-2853 Reorder TemplateList
* refactor template card
* update timeout
* fix removelistener
* fix spacing
* split enabled from offline
* add spacing to go back
* N8N-2853 Fixed Screens for Tablet & Mobile
* N8N-2853 Update Stores Order
* remove image componet
* remove placeholder changes
* N8N-2853 Fixed Chinnese Placeholders for El Select Component that comes from the Library Upgrade
* N8N-2853 Fixed Vue Agile Console Warnings
* N8N-2853 Update Collection Route
* :pencil2: Update jobs URL
* :truck: Move logging to root component
* :zap: Refactor `deploymentType` to `isInternalUser`
* :zap: Improve syntax
* fix cut bug in readonly view
* N8N-3012 Fixed Details section in templates with lots of description, Fixed Mardown Block with overflox-x
* N8N-3012 Increased Font-size, Spacing and Line-height of the Categories Items
* N8N-3012 Fixed Vue-agile client width error on resize
* only delay redirect for root path
* N8N-3012 Fixed Carousel Arrows that Disappear
* N8N-3012 Make Loading Screen same color as Templates
* N8N-3012 Markdown renders inline block as block code
* add offline warning
* hide log from workflow iframe
* update text
* make search button larger
* N8N-3012 Categories / Tags extended all the way in details section
* load data in cred modals
* remove deleted message
* add external hook
* remove import
* update env variable description
* fix markdown width issue
* disable telemetry for demo, add session id to template pages
* fix telemetery bugs
* N8N-3012 Not found Collections/Wokrkflow
* N8N-3012 Checkboxes change order when categories are changed
* N8N-3012 Refactor SortedCategories inside TemplateFilters component
* fix firefox bug
* add telemetry requirements
* add error check
* N8N-3012 Update GoBackButton to check if Route History is present
* N8N-3012 Fixed WF Nodes Icons
* hide workflow screenshots
* remove unnessary mixins
* rename prop
* fix design a bit
* rename data
* clear workspace on destroy
* fix copy paste bug
* fix disabled state
* N8N-3012 Fixed Saving/Leave without saving Modal
* fix telemetry issue
* fix telemetry issues, error bug
* fix error notification
* disable workflow menu items on templates
* fix i18n elementui issue
* Remove Emit - NodeType from HoverableNodeIcon component
* TechnicalFixes: NavigateTo passed down as function should be helper
* TechnicalFixes: Update NavigateTo function
* TechnicalFixes: Add FilterCoreNodes directly as function
* check for empty connecitions
* fix titles
* respect new lines
* increase categories to be sliced
* rename prop
* onUseWorkflow
* refactor click event
* fix bug, refactor
* fix loading story
* add default
* fix styles at right level of abstraction
* add wrapper with width
* remove loading blocks component
* add story
* rename prop
* fix spacing
* refactor tag, add story
* move margin to container
* fix tag redirect, remove unnessary check
* make version optional
* rename view
* move from workflows to templates store
* remove unnessary change
* remove unnessary css
* rename component
* refactor collection card
* add boolean to prevent shrink
* clean up carousel
* fix redirection bug on save
* remove listeners to fix multiple listeners bug
* remove unnessary types
* clean up boolean set
* fix node select bug
* rename component
* remove unnessary class
* fix redirection bug
* remove unnessary error
* fix typo
* fix blockquotes, pre
* refactor markdown rendering
* remove console log
* escape markdown
* fix safari bug
* load active workflows to fix modal bug
* :arrow_up: Update package-lock.json file
* :zap: Add n8n version as header
Co-authored-by: Mutasem Aldmour <4711238+mutdmour@users.noreply.github.com>
Co-authored-by: Mutasem <mutdmour@gmail.com>
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
* :bookmark: Release n8n-workflow@0.88.0
* :arrow_up: Set n8n-workflow@0.88.0 on n8n-core
* :bookmark: Release n8n-core@0.106.0
* :arrow_up: Set n8n-core@0.106.0 and n8n-workflow@0.88.0 on n8n-node-dev
* :bookmark: Release n8n-node-dev@0.45.0
* :arrow_up: Set n8n-core@0.106.0 and n8n-workflow@0.88.0 on n8n-nodes-base
* :bookmark: Release n8n-nodes-base@0.163.0
* :bookmark: Release n8n-design-system@0.12.0
* :arrow_up: Set n8n-design-system@0.12.0 and n8n-workflow@0.88.0 on n8n-editor-ui
* :bookmark: Release n8n-editor-ui@0.132.0
* :arrow_up: Set n8n-core@0.106.0, n8n-editor-ui@0.132.0, n8n-nodes-base@0.163.0 and n8n-workflow@0.88.0 on n8n
* :bookmark: Release n8n@0.165.0
* fix default user bug
* fix bug
* update package lock
* fix duplicate import
* fix settings
* fix templates access
Co-authored-by: Oliver Trajceski <olivertrajceski@yahoo.com>
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
* :zap: n8n 2952 personalisation (#2911)
* refactor/update survey
* update customers
* Fix up personalization survey
* fix recommendation logic
* set to false
* hide suggested nodes when empty
* use keys
* add missing logic
* switch types
* Fix logic
* remove unused constants
* add back constant
* refactor filtering inputs
* hide last input on personal
* fix other
* ✨ add current pw check for change password (#2912)
* fix back button
* Add current password input
* add to modal
* update package.json
* delete mock file
* delete mock file
* get settings func
* update router
* update package lock
* update package lock
* Fix invite text
* update error i18n
* open personalization on search if not set
* update error view i18n
* update change password
* update settings sidebar
* remove import
* fix sidebar
* :goal_net: fix error for credential/workflow not found
* update invite modal
* ✨ persist skipping owner setup (#2894)
* 🚧 added skipInstanceOwnerSetup to DB + route to save skipping
* ✨ skipping owner setup persists
* ✅ add tests for authorization and /owner/skip-setup
* 🛠 refactor FE settings getter
* 🛠 move setting setup stop to owner creation
* :bug: fix wrong setting of User.isPending
* :bug: fix isPending
* 🏷 add isPending to PublicUser
* :bug: fix unused import
* update delete modal
* change password modal
* remove _label
* sort keys
* remove key
* update key names
* fix test endpoint
* 🥅 Handle error workflows permissions (#2908)
* Handle error workflows permissions
* Fixed wrong query format
* 🛠 refactor query
Co-authored-by: Ben Hesseldieck <1849459+BHesseldieck@users.noreply.github.com>
* fix ts issue
* fix list after ispending changes
* fix error page bugs
* fix error redirect
* fix notification
* :bug: fix survey import in migration
* fix up spacing
* update keys spacing
* update keys
* add space
* update key
* fix up more spacing
* 🔐 add current password (#2919)
* add curr pass
* update key names
* :bug: stringify tag ids
* 🔐 check current password before update
* add package lock
* fix dep version
* update version
* 🐛 fix access for instance owner to credentials (#2927)
* 🛠 stringify tag id on entity
* 🔐 Update password requirements (#2920)
* :zap: Update password requirements
* :zap: Adjust random helpers
* ✅ fix tests for currentPassword check
* change redirection, add homepage
* fix error view redirection
* updated wording
* fix setup redirection
* update validator
* remove successfully
* update consumers
* update settings redirect
* on signup, redirect to homepage
* update empty state
* add space to emails
* remove brackets
* add opacity
* update spacing
* remove border from last user
* personal details updated
* update redirect on sign up
* prevent text wrap
* fix notification title line height
* remove console log
* 🐘 Support testing with Postgres and MySQL (#2886)
* :card_file_box: Fix Postgres migrations
* :zap: Add DB-specific scripts
* :sparkles: Set up test connections
* :zap: Add Postgres UUID check
* :test_tube: Make test adjustments for Postgres
* :zap: Refactor connection logic
* :sparkles: Set up double init for Postgres
* :pencil2: Add TODOs
* :zap: Refactor DB dropping logic
* :sparkles: Implement global teardown
* :sparkles: Create TypeORM wrappers
* :sparkles: Initial MySQL setup
* :zap: Clean up Postgres connection options
* :zap: Simplify by sharing bootstrap connection name
* :card_file_box: Fix MySQL migrations
* :fire: Remove comments
* :zap: Use ES6 imports
* :fire: Remove outdated comments
* :zap: Centralize bootstrap connection name handles
* :zap: Centralize database types
* :pencil2: Update comment
* :truck: Rename `findRepository`
* :construction: Attempt to truncate MySQL
* :sparkles: Implement creds router
* :bug: Fix duplicated MySQL bootstrap
* :bug: Fix misresolved merge conflict
* :card_file_box: Fix tags migration
* :card_file_box: Fix MySQL UM migration
* :bug: Fix MySQL parallelization issues
* :blue_book: Augment TypeORM to prevent error
* :fire: Remove comments
* :sparkles: Support one sqlite DB per suite run
* :truck: Move `testDb` to own module
* :fire: Deduplicate bootstrap Postgres logic
* :fire: Remove unneeded comment
* :zap: Make logger init calls consistent
* :pencil2: Improve comment
* :pencil2: Add dividers
* :art: Improve formatting
* :fire: Remove duplicate MySQL global setting
* :truck: Move comment
* :zap: Update default test script
* :fire: Remove unneeded helper
* :zap: Unmarshal answers from Postgres
* :bug: Phase out `isTestRun`
* :zap: Refactor `isEmailSetup`
* :fire: Remove unneeded imports
* :zap: Handle bootstrap connection errors
* :fire: Remove unneeded imports
* :fire: Remove outdated comments
* :pencil2: Fix typos
* :truck: Relocate `answersFormatter`
* :rewind: Undo package.json miscommit
* :fire: Remove unneeded import
* :zap: Refactor test DB prefixing
* :zap: Add no-leftover check to MySQL
* :package: Update package.json
* :zap: Autoincrement on simulated MySQL truncation
* :fire: Remove debugging queries
* ✏️ fix email template link expiry
* 🔥 remove unused import
* ✅ fix testing email not sent error
* fix duplicate import
* add package lock
* fix export
* change opacity
* fix text issue
* update action box
* update error title
* update forgot password
* update survey
* update product text
* remove unset fields
* add category to page events
* remove duplicate import
* update key
* update key
* update label type
* 🎨 um/fe review (#2946)
* :whale: Update Node.js versions of Docker images to 16
* :bug: Fix that some keyboard shortcuts did no longer work
* N8N-3057 Fixed Keyboard shortcuts no longer working on / Fixed callDebounced function
* N8N-3057 Update Debounce Function
* N8N-3057 Refactor callDebounce function
* N8N-3057 Update Dobounce Function
* :bug: Fix issue with tooltips getting displayed behind node details view
* fix tooltips z-index
* move all element ui components
* update package lock
* :bug: Fix credentials list load issue (#2931)
* always fetch credentials
* only fetch credentials once
* :zap: Allow to disable hiring banner (#2902)
* :sparkles: Add flag
* :zap: Adjust interfaces
* :zap: Adjust store module
* :zap: Adjust frontend settings
* :zap: Adjust frontend display
* :bug: Fix issue that ctrl + o did behave wrong on workflow templates page (#2934)
* N8N-3094 Workflow Templates cmd-o acts on the Preview/Iframe
* N8N-3094 Workflow Templates cmd-o acts on the Preview/Iframe
* disable shortcuts for preview
Co-authored-by: Mutasem <mutdmour@gmail.com>
* :arrow_up: Update package-lock.json file
* :bug: Fix sorting by field in Baserow Node (#2942)
This fixes a bug which currently leads to the "Sorting" option of the node to be ignored.
* :bug: Fix some i18n line break issues
* :sparkles: Add Odoo Node (#2601)
* added odoo scaffolding
* update getting data from odoo instance
* added scaffolding for main loop and request functions
* added functions for CRUD opperations
* improoved error handling for odooJSONRPCRequest
* updated odoo node and fixing nodelinter issues
* fixed alpabetical order
* fixed types in odoo node
* fixing linter errors
* fixing linter errors
* fixed data shape returned from man loop
* updated node input types, added fields list to models
* update when custom resource is selected options for fields list will be populated dynamicly
* minor fixes
* :hammer: fixed credential test, updating CRUD methods
* :hammer: added additional fields to crm resource
* :hammer: added descriptions, fixed credentials test bug
* :hammer: standardize node and descriptions design
* :hammer: removed comments
* :hammer: added pagination to getAll operation
* :zap: removed leftover function from previous implementation, removed required from optional fields
* :zap: fixed id field, added indication of type and if required to field description, replaced string input in filters to fetched list of fields
* :hammer: fetching list of models from odoo, added selection of fields to be returned to predefined models, fixes accordingly to review
* :zap: Small improvements
* :hammer: extracted adress fields into collection, changed fields to include in descriptions, minor tweaks
* :zap: Improvements
* :hammer: working on review
* :hammer: fixed linter errors
* :hammer: review wip
* :hammer: review wip
* :hammer: review wip
* :zap: updated display name for URL in credentials
* :hammer: added checks for valid id to delete and update
* :zap: Minor improvements
Co-authored-by: ricardo <ricardoespinoza105@gmail.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
* :bug: Handle Wise SCA requests (#2734)
* :zap: Improve Wise error message after previous change
* fix duplicate import
* add package lock
* fix export
* change opacity
* fix text issue
* update action box
* update error title
* update forgot password
* update survey
* update product text
* remove unset fields
* add category to page events
* remove duplicate import
* update key
* update key
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
Co-authored-by: Oliver Trajceski <olivertrajceski@yahoo.com>
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Tom <19203795+that-one-tom@users.noreply.github.com>
Co-authored-by: Michael Kret <88898367+michael-radency@users.noreply.github.com>
Co-authored-by: ricardo <ricardoespinoza105@gmail.com>
Co-authored-by: pemontto <939704+pemontto@users.noreply.github.com>
* Move owner skip from settings
* 🐛 SMTP fixes (#2937)
* :fire: Remove `UM_` from SMTP env vars
* :fire: Remove SMTP host default value
* :zap: Update sender value
* :zap: Update invite template
* :zap: Update password reset template
* :zap: Update `N8N_EMAIL_MODE` default value
* :fire: Remove `EMAIL` from all SMTP vars
* :sparkles: Implement `verifyConnection()`
* :truck: Reposition comment
* :pencil2: Fix typo
* :pencil2: Minor env var documentation improvements
* :art: Fix spacing
* :art: Fix spacing
* :card_file_box: Remove SMTP settings cache
* :zap: Adjust log message
* :zap: Update error message
* :pencil2: Fix template typo
* :pencil2: Adjust wording
* :zap: Interpolate email into success toast
* :pencil2: Adjust base message in `verifyConnection()`
* :zap: Verify connection on password reset
* :zap: Bring up POST /users SMTP check
* :bug: remove cookie if cookie is not valid
* :zap: verify connection on instantiation
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* 🔊 create logger helper for migrations (#2944)
* 🔥 remove unused database
* :loud_sound: add migration logging for sqlite
* 🔥 remove unnecessary index creation
* ⚡️ change log level to warn
* 🐛 Fix issue with workflow process to initialize db connection correctly (#2948)
* ✏️ update error messages for webhhook run/activation
* 📈 Implement telemetry events (#2868)
* Implement basic telemetry events
* Fixing user id as part of the telemetry data
* Added user id to be part of the tracked data
* :sparkles: Create telemetry mock
* :test_tube: Fix tests with telemetry mock
* :test_tube: Fix missing key in authless endpoint
* :blue_book: Create authless request type
* :fire: Remove log
* :bug: Fix `migration_strategy` assignment
* :blue_book: Remove `instance_id` from `ITelemetryUserDeletionData`
* :zap: Simplify concatenation
* :zap: Simplify `track()` call signature
* Fixed payload of telemetry to always include user_id
* Fixing minor issues
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
* 🔊 Added logs to credentials, executions and workflows (#2915)
* Added logs to credentials, executions and workflows
* Some updates according to ivov's feedback
* :zap: update log levels
* ✅ fix tests
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* :bug: fix telemetry error
* fix conflicts with master
* fix duplicate
* add package-lock
* :bug: Um/fixes (#2952)
* add initials to avatar
* redirect to signin if invalid token
* update pluralization
* add auth page category
* data transferred
* touch up setup page
* update button to add cursor
* fix personalization modal not closing
* ✏️ fix environment name
* 🐛 fix disabling UM
* 🐛 fix email setup flag
* 🐛 FE fixes 1 (#2953)
* add initials to avatar
* redirect to signin if invalid token
* update pluralization
* add auth page category
* data transferred
* touch up setup page
* update button to add cursor
* fix personalization modal not closing
* capitalize labels, refactor text
* Fixed the issue with telemetry data missing for personalization survey
* Changed invite email text
* 🐛 Fix quotes issue with postgres migration (#2958)
* Changed text for invite link
* 🐛 fix reset command for mysql
* ✅ fix race condition in test DB creation
* 🔐 block user creation if UM is disabled
* 🥅 improve smtp setup issue error
* :zap: update error message
* refactor route rules
* set package lock
* fix access
* remove capitalize
* update input labels
* refactor heading
* change span to fragment
* add route types
* refactor views
* ✅ fix increase timeout for mysql
* :zap: correct logic of error message
* refactor view names
* :zap: update randomString
* 📈 Added missing event regarding failed emails (#2964)
* replace label with info
* 🛠 refactor JWT-secret creation
* remove duplicate key
* remove unused part
* remove semicolon
* fix up i18n pattern
* update translation keys
* update urls
* support i18n in nds
* fix how external keys are handled
* add source
* 💥 update timestamp of UM migration
* ✏️ small message updates
* fix tracking
* update notification line-height
* fix avatar opacity
* fix up empty state
* shift focus to input
* 🔐 Disable basic auth after owner has been set up (#2973)
* Disable basic auth after owner has been set up
* Remove unnecessary comparison
* rename modal title
* 🐛 use pgcrypto extension for uuid creation (#2977)
* 📧 Added public url variable for emails (#2967)
* Added public url variable for emails
* Fixed base url for reset password - the current implementation overrides possibly existing path
* Change variable name to editorUrl
* Using correct name editorUrl for emails
* Changed variable description
* Improved base url naming and appending path so it remains consistent
* Removed trailing slash from editor base url
* 🌐 fix i18n pattern (#2970)
* fix up i18n pattern
* update translation keys
* update urls
* support i18n in nds
* fix how external keys are handled
* add source
* Um/fixes 1000 (#2980)
* fix select issue
* 😫 hacky solution to circumvent pgcrypto (#2979)
* fix owner bug after transfer. always fetch latest credentials
* add confirmation modal to setup
* Use webhook url as fallback when editor url is not defined
* fix enter bug
* update modal
* update modal
* update modal text, fix bug in settings view
* Updating editor url to not append path
* rename keys
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Mutasem Aldmour <4711238+mutdmour@users.noreply.github.com>
Co-authored-by: Mutasem <mutdmour@gmail.com>
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
Co-authored-by: Omar Ajoue <krynble@gmail.com>
Co-authored-by: Oliver Trajceski <olivertrajceski@yahoo.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
Co-authored-by: Tom <19203795+that-one-tom@users.noreply.github.com>
Co-authored-by: Michael Kret <88898367+michael-radency@users.noreply.github.com>
Co-authored-by: ricardo <ricardoespinoza105@gmail.com>
Co-authored-by: pemontto <939704+pemontto@users.noreply.github.com>
2022-03-14 06:46:32 -07:00
|
|
|
|
2021-07-11 09:05:44 -07:00
|
|
|
additionalData.hooks = WorkflowExecuteAdditionalData.getWorkflowHooksMain(
|
|
|
|
data,
|
|
|
|
executionId,
|
|
|
|
true,
|
2021-07-10 02:34:41 -07:00
|
|
|
);
|
2021-11-05 09:45:51 -07:00
|
|
|
|
2022-09-21 01:20:29 -07:00
|
|
|
try {
|
2022-11-11 02:14:45 -08:00
|
|
|
await PermissionChecker.check(workflow, data.userId);
|
2022-09-21 01:20:29 -07:00
|
|
|
} catch (error) {
|
2022-11-04 09:34:47 -07:00
|
|
|
ErrorReporter.error(error);
|
2022-09-21 01:20:29 -07:00
|
|
|
// Create a failed execution with the data for the node
|
|
|
|
// save it and abort execution
|
|
|
|
const failedExecution = generateFailedExecutionFromError(
|
|
|
|
data.executionMode,
|
|
|
|
error,
|
|
|
|
error.node,
|
|
|
|
);
|
2023-05-23 17:01:45 -07:00
|
|
|
await additionalData.hooks.executeHookFunctions('workflowExecuteAfter', [failedExecution]);
|
|
|
|
this.activeExecutions.remove(executionId, failedExecution);
|
2022-09-21 01:20:29 -07:00
|
|
|
return executionId;
|
|
|
|
}
|
|
|
|
|
2021-11-05 09:45:51 -07:00
|
|
|
additionalData.hooks.hookFunctions.sendResponse = [
|
|
|
|
async (response: IExecuteResponsePromiseData): Promise<void> => {
|
|
|
|
if (responsePromise) {
|
|
|
|
responsePromise.resolve(response);
|
|
|
|
}
|
|
|
|
},
|
|
|
|
];
|
|
|
|
|
2023-02-17 01:54:07 -08:00
|
|
|
additionalData.setExecutionStatus = WorkflowExecuteAdditionalData.setExecutionStatus.bind({
|
|
|
|
executionId,
|
|
|
|
});
|
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
additionalData.sendMessageToUI = WorkflowExecuteAdditionalData.sendMessageToUI.bind({
|
|
|
|
sessionId: data.sessionId,
|
|
|
|
});
|
2021-05-29 11:41:25 -07:00
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
if (data.executionData !== undefined) {
|
|
|
|
Logger.debug(`Execution ID ${executionId} had Execution data. Running with payload.`, {
|
|
|
|
executionId,
|
|
|
|
});
|
|
|
|
const workflowExecute = new WorkflowExecute(
|
|
|
|
additionalData,
|
|
|
|
data.executionMode,
|
|
|
|
data.executionData,
|
|
|
|
);
|
|
|
|
workflowExecution = workflowExecute.processRunExecutionData(workflow);
|
|
|
|
} else if (
|
|
|
|
data.runData === undefined ||
|
|
|
|
data.startNodes === undefined ||
|
|
|
|
data.startNodes.length === 0 ||
|
|
|
|
data.destinationNode === undefined
|
|
|
|
) {
|
|
|
|
Logger.debug(`Execution ID ${executionId} will run executing all nodes.`, { executionId });
|
|
|
|
// Execute all nodes
|
|
|
|
|
2023-08-10 05:06:16 -07:00
|
|
|
const startNode = WorkflowHelpers.getExecutionStartNode(data, workflow);
|
2022-11-08 00:40:05 -08:00
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
// Can execute without webhook so go on
|
|
|
|
const workflowExecute = new WorkflowExecute(additionalData, data.executionMode);
|
2022-07-20 08:50:39 -07:00
|
|
|
workflowExecution = workflowExecute.run(
|
|
|
|
workflow,
|
2022-11-08 00:40:05 -08:00
|
|
|
startNode,
|
2022-07-20 08:50:39 -07:00
|
|
|
data.destinationNode,
|
|
|
|
data.pinData,
|
|
|
|
);
|
2021-07-10 02:34:41 -07:00
|
|
|
} else {
|
|
|
|
Logger.debug(`Execution ID ${executionId} is a partial execution.`, { executionId });
|
|
|
|
// Execute only the nodes between start and destination nodes
|
|
|
|
const workflowExecute = new WorkflowExecute(additionalData, data.executionMode);
|
|
|
|
workflowExecution = workflowExecute.runPartialWorkflow(
|
|
|
|
workflow,
|
|
|
|
data.runData,
|
|
|
|
data.startNodes,
|
|
|
|
data.destinationNode,
|
2022-07-20 08:50:39 -07:00
|
|
|
data.pinData,
|
2021-07-10 02:34:41 -07:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2021-07-14 07:34:54 -07:00
|
|
|
this.activeExecutions.attachWorkflowExecution(executionId, workflowExecution);
|
|
|
|
|
|
|
|
if (workflowTimeout > 0) {
|
2022-04-08 10:37:27 -07:00
|
|
|
const timeout = Math.min(workflowTimeout, config.getEnv('executions.maxTimeout')) * 1000; // as seconds
|
2021-07-14 07:34:54 -07:00
|
|
|
executionTimeout = setTimeout(() => {
|
2023-05-23 17:01:45 -07:00
|
|
|
void this.activeExecutions.stopExecution(executionId, 'timeout');
|
2021-07-14 07:34:54 -07:00
|
|
|
}, timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
workflowExecution
|
|
|
|
.then((fullRunData) => {
|
|
|
|
clearTimeout(executionTimeout);
|
|
|
|
if (workflowExecution.isCanceled) {
|
|
|
|
fullRunData.finished = false;
|
|
|
|
}
|
2023-02-17 01:54:07 -08:00
|
|
|
fullRunData.status = this.activeExecutions.getStatus(executionId);
|
2021-07-14 07:34:54 -07:00
|
|
|
this.activeExecutions.remove(executionId, fullRunData);
|
|
|
|
})
|
2023-05-23 17:01:45 -07:00
|
|
|
.catch(async (error) =>
|
2021-07-14 07:34:54 -07:00
|
|
|
this.processError(
|
|
|
|
error,
|
|
|
|
new Date(),
|
|
|
|
data.executionMode,
|
|
|
|
executionId,
|
|
|
|
additionalData.hooks,
|
2023-05-23 17:01:45 -07:00
|
|
|
),
|
|
|
|
);
|
2021-07-10 02:34:41 -07:00
|
|
|
} catch (error) {
|
|
|
|
await this.processError(
|
|
|
|
error,
|
|
|
|
new Date(),
|
|
|
|
data.executionMode,
|
|
|
|
executionId,
|
|
|
|
additionalData.hooks,
|
|
|
|
);
|
|
|
|
|
|
|
|
throw error;
|
2020-01-17 17:34:31 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
return executionId;
|
|
|
|
}
|
|
|
|
|
2023-01-02 03:14:39 -08:00
|
|
|
async enqueueExecution(
|
2021-08-21 05:11:32 -07:00
|
|
|
data: IWorkflowExecutionDataProcess,
|
|
|
|
loadStaticData?: boolean,
|
|
|
|
realtime?: boolean,
|
|
|
|
restartExecutionId?: string,
|
2021-11-05 09:45:51 -07:00
|
|
|
responsePromise?: IDeferredPromise<IExecuteResponsePromiseData>,
|
2021-08-21 05:11:32 -07:00
|
|
|
): Promise<string> {
|
2021-02-08 23:59:32 -08:00
|
|
|
// TODO: If "loadStaticData" is set to true it has to load data new on worker
|
|
|
|
|
|
|
|
// Register the active execution
|
2021-08-21 05:11:32 -07:00
|
|
|
const executionId = await this.activeExecutions.add(data, undefined, restartExecutionId);
|
2021-11-05 09:45:51 -07:00
|
|
|
if (responsePromise) {
|
|
|
|
this.activeExecutions.attachResponsePromise(executionId, responsePromise);
|
|
|
|
}
|
2021-02-08 23:59:32 -08:00
|
|
|
|
2023-03-16 07:34:13 -07:00
|
|
|
const jobData: JobData = {
|
2021-02-08 23:59:32 -08:00
|
|
|
executionId,
|
|
|
|
loadStaticData: !!loadStaticData,
|
|
|
|
};
|
|
|
|
|
|
|
|
let priority = 100;
|
|
|
|
if (realtime === true) {
|
|
|
|
// Jobs which require a direct response get a higher priority
|
|
|
|
priority = 50;
|
|
|
|
}
|
|
|
|
// TODO: For realtime jobs should probably also not do retry or not retry if they are older than x seconds.
|
|
|
|
// Check if they get retried by default and how often.
|
|
|
|
const jobOptions = {
|
|
|
|
priority,
|
|
|
|
removeOnComplete: true,
|
|
|
|
removeOnFail: true,
|
|
|
|
};
|
2023-03-16 07:34:13 -07:00
|
|
|
let job: Job;
|
2021-07-11 09:05:44 -07:00
|
|
|
let hooks: WorkflowHooks;
|
2021-07-10 02:34:41 -07:00
|
|
|
try {
|
|
|
|
job = await this.jobQueue.add(jobData, jobOptions);
|
2021-07-11 09:05:44 -07:00
|
|
|
|
2021-09-02 09:58:13 -07:00
|
|
|
console.log(`Started with job ID: ${job.id.toString()} (Execution ID: ${executionId})`);
|
2021-07-11 09:05:44 -07:00
|
|
|
|
|
|
|
hooks = WorkflowExecuteAdditionalData.getWorkflowHooksWorkerMain(
|
|
|
|
data.executionMode,
|
|
|
|
executionId,
|
|
|
|
data.workflowData,
|
|
|
|
{ retryOf: data.retryOf ? data.retryOf.toString() : undefined },
|
|
|
|
);
|
|
|
|
|
|
|
|
// Normally also workflow should be supplied here but as it only used for sending
|
|
|
|
// data to editor-UI is not needed.
|
2023-05-23 17:01:45 -07:00
|
|
|
await hooks.executeHookFunctions('workflowExecuteBefore', []);
|
2021-07-10 02:34:41 -07:00
|
|
|
} catch (error) {
|
2021-07-12 01:12:45 -07:00
|
|
|
// We use "getWorkflowHooksWorkerExecuter" as "getWorkflowHooksWorkerMain" does not contain the
|
|
|
|
// "workflowExecuteAfter" which we require.
|
|
|
|
const hooks = WorkflowExecuteAdditionalData.getWorkflowHooksWorkerExecuter(
|
|
|
|
data.executionMode,
|
|
|
|
executionId,
|
|
|
|
data.workflowData,
|
|
|
|
{ retryOf: data.retryOf ? data.retryOf.toString() : undefined },
|
|
|
|
);
|
2021-07-10 02:34:41 -07:00
|
|
|
await this.processError(error, new Date(), data.executionMode, executionId, hooks);
|
2021-07-12 01:12:45 -07:00
|
|
|
throw error;
|
2021-07-10 02:34:41 -07:00
|
|
|
}
|
|
|
|
|
2021-02-08 23:59:32 -08:00
|
|
|
const workflowExecution: PCancelable<IRun> = new PCancelable(
|
|
|
|
async (resolve, reject, onCancel) => {
|
|
|
|
onCancel.shouldReject = false;
|
|
|
|
onCancel(async () => {
|
2023-03-16 07:34:13 -07:00
|
|
|
const queue = Container.get(Queue);
|
2023-01-02 03:14:39 -08:00
|
|
|
await queue.stopJob(job);
|
2021-08-29 11:58:11 -07:00
|
|
|
|
2021-07-12 01:12:45 -07:00
|
|
|
// We use "getWorkflowHooksWorkerExecuter" as "getWorkflowHooksWorkerMain" does not contain the
|
|
|
|
// "workflowExecuteAfter" which we require.
|
|
|
|
const hooksWorker = WorkflowExecuteAdditionalData.getWorkflowHooksWorkerExecuter(
|
2021-02-08 23:59:32 -08:00
|
|
|
data.executionMode,
|
|
|
|
executionId,
|
2021-07-10 02:34:41 -07:00
|
|
|
data.workflowData,
|
2021-02-08 23:59:32 -08:00
|
|
|
{ retryOf: data.retryOf ? data.retryOf.toString() : undefined },
|
2021-08-29 11:58:11 -07:00
|
|
|
);
|
|
|
|
|
2021-02-08 23:59:32 -08:00
|
|
|
const error = new WorkflowOperationError('Workflow-Execution has been canceled!');
|
|
|
|
await this.processError(error, new Date(), data.executionMode, executionId, hooksWorker);
|
2021-08-29 11:58:11 -07:00
|
|
|
|
2021-02-08 23:59:32 -08:00
|
|
|
reject(error);
|
|
|
|
});
|
|
|
|
|
2023-03-16 07:34:13 -07:00
|
|
|
const jobData: Promise<JobResponse> = job.finished();
|
2021-08-29 11:58:11 -07:00
|
|
|
|
2022-04-08 10:37:27 -07:00
|
|
|
const queueRecoveryInterval = config.getEnv('queue.bull.queueRecoveryInterval');
|
2021-08-29 11:58:11 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
const racingPromises: Array<Promise<JobResponse>> = [jobData];
|
2021-08-29 11:58:11 -07:00
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
let clearWatchdogInterval;
|
2021-02-08 23:59:32 -08:00
|
|
|
if (queueRecoveryInterval > 0) {
|
|
|
|
/** ***********************************************
|
|
|
|
* Long explanation about what this solves: *
|
|
|
|
* This only happens in a very specific scenario *
|
|
|
|
* when Redis crashes and recovers shortly *
|
|
|
|
* but during this time, some execution(s) *
|
|
|
|
* finished. The end result is that the main *
|
2022-09-02 07:13:17 -07:00
|
|
|
* process will wait indefinitely and never *
|
2021-02-08 23:59:32 -08:00
|
|
|
* get a response. This adds an active polling to*
|
|
|
|
* the queue that allows us to identify that the *
|
2021-05-01 20:43:01 -07:00
|
|
|
* execution finished and get information from *
|
2021-02-08 23:59:32 -08:00
|
|
|
* the database. *
|
|
|
|
************************************************ */
|
|
|
|
let watchDogInterval: NodeJS.Timeout | undefined;
|
2021-08-29 11:58:11 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
const watchDog: Promise<JobResponse> = new Promise((res) => {
|
2021-02-08 23:59:32 -08:00
|
|
|
watchDogInterval = setInterval(async () => {
|
|
|
|
const currentJob = await this.jobQueue.getJob(job.id);
|
|
|
|
// When null means job is finished (not found in queue)
|
|
|
|
if (currentJob === null) {
|
|
|
|
// Mimic worker's success message
|
|
|
|
res({ success: true });
|
2021-08-29 11:58:11 -07:00
|
|
|
}
|
2021-02-08 23:59:32 -08:00
|
|
|
}, queueRecoveryInterval * 1000);
|
2021-08-29 11:58:11 -07:00
|
|
|
});
|
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
racingPromises.push(watchDog);
|
2021-08-29 11:58:11 -07:00
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
clearWatchdogInterval = () => {
|
2021-02-08 23:59:32 -08:00
|
|
|
if (watchDogInterval) {
|
|
|
|
clearInterval(watchDogInterval);
|
|
|
|
watchDogInterval = undefined;
|
2021-08-29 11:58:11 -07:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
2021-02-13 11:40:27 -08:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
let racingPromisesResult: JobResponse = {
|
|
|
|
success: false,
|
|
|
|
};
|
2021-07-10 02:34:41 -07:00
|
|
|
try {
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
racingPromisesResult = await Promise.race(racingPromises);
|
2021-02-08 23:59:32 -08:00
|
|
|
if (clearWatchdogInterval !== undefined) {
|
|
|
|
clearWatchdogInterval();
|
|
|
|
}
|
|
|
|
} catch (error) {
|
2022-11-04 09:34:47 -07:00
|
|
|
ErrorReporter.error(error);
|
2021-07-12 01:12:45 -07:00
|
|
|
// We use "getWorkflowHooksWorkerExecuter" as "getWorkflowHooksWorkerMain" does not contain the
|
|
|
|
// "workflowExecuteAfter" which we require.
|
2021-07-10 02:34:41 -07:00
|
|
|
const hooks = WorkflowExecuteAdditionalData.getWorkflowHooksWorkerExecuter(
|
|
|
|
data.executionMode,
|
|
|
|
executionId,
|
|
|
|
data.workflowData,
|
|
|
|
{ retryOf: data.retryOf ? data.retryOf.toString() : undefined },
|
2021-02-08 23:59:32 -08:00
|
|
|
);
|
2021-07-10 02:34:41 -07:00
|
|
|
Logger.error(`Problem with execution ${executionId}: ${error.message}. Aborting.`);
|
|
|
|
if (clearWatchdogInterval !== undefined) {
|
|
|
|
clearWatchdogInterval();
|
|
|
|
}
|
|
|
|
await this.processError(error, new Date(), data.executionMode, executionId, hooks);
|
2021-02-08 23:59:32 -08:00
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
reject(error);
|
|
|
|
}
|
2021-04-17 07:44:07 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
// optimization: only pull and unflatten execution data from the Db when it is needed
|
|
|
|
const executionHasPostExecutionPromises =
|
|
|
|
this.activeExecutions.getPostExecutePromiseCount(executionId) > 0;
|
|
|
|
|
|
|
|
if (executionHasPostExecutionPromises) {
|
|
|
|
Logger.debug(
|
|
|
|
`Reading execution data for execution ${executionId} from db for PostExecutionPromise.`,
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
Logger.debug(
|
|
|
|
`Skipping execution data for execution ${executionId} since there are no PostExecutionPromise.`,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2023-06-20 10:13:18 -07:00
|
|
|
const fullExecutionData = await Container.get(ExecutionRepository).findSingleExecution(
|
|
|
|
executionId,
|
|
|
|
{
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
includeData: executionHasPostExecutionPromises,
|
|
|
|
unflattenData: executionHasPostExecutionPromises,
|
2023-06-20 10:13:18 -07:00
|
|
|
},
|
|
|
|
);
|
|
|
|
if (!fullExecutionData) {
|
|
|
|
return reject(new Error(`Could not find execution with id "${executionId}"`));
|
|
|
|
}
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
|
|
|
|
const runData: IRun = {
|
|
|
|
data: {},
|
2021-02-08 23:59:32 -08:00
|
|
|
finished: fullExecutionData.finished,
|
|
|
|
mode: fullExecutionData.mode,
|
|
|
|
startedAt: fullExecutionData.startedAt,
|
|
|
|
stoppedAt: fullExecutionData.stoppedAt,
|
|
|
|
} as IRun;
|
2021-08-29 11:58:11 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (executionHasPostExecutionPromises) {
|
|
|
|
runData.data = (fullExecutionData as IExecutionResponse).data;
|
|
|
|
}
|
|
|
|
|
|
|
|
// NOTE: due to the optimization of not loading the execution data from the db when no post execution promises are present,
|
|
|
|
// the execution data in runData.data MAY not be available here.
|
|
|
|
// This means that any function expecting with runData has to check if the runData.data defined from this point
|
2021-02-08 23:59:32 -08:00
|
|
|
this.activeExecutions.remove(executionId, runData);
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
|
2021-02-08 23:59:32 -08:00
|
|
|
// Normally also static data should be supplied here but as it only used for sending
|
|
|
|
// data to editor-UI is not needed.
|
2023-05-23 17:01:45 -07:00
|
|
|
await hooks.executeHookFunctions('workflowExecuteAfter', [runData]);
|
2021-03-10 06:51:18 -08:00
|
|
|
try {
|
|
|
|
// Check if this execution data has to be removed from database
|
|
|
|
// based on workflow settings.
|
2023-03-24 05:11:48 -07:00
|
|
|
const workflowSettings = data.workflowData.settings ?? {};
|
|
|
|
const saveDataErrorExecution =
|
|
|
|
workflowSettings.saveDataErrorExecution ?? config.getEnv('executions.saveDataOnError');
|
|
|
|
const saveDataSuccessExecution =
|
|
|
|
workflowSettings.saveDataSuccessExecution ??
|
|
|
|
config.getEnv('executions.saveDataOnSuccess');
|
2021-03-10 06:51:18 -08:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
const workflowDidSucceed = !racingPromisesResult.error;
|
2021-03-10 06:51:18 -08:00
|
|
|
if (
|
|
|
|
(workflowDidSucceed && saveDataSuccessExecution === 'none') ||
|
|
|
|
(!workflowDidSucceed && saveDataErrorExecution === 'none')
|
|
|
|
) {
|
2023-09-20 06:21:42 -07:00
|
|
|
await Container.get(ExecutionRepository).softDelete(executionId);
|
2021-08-29 11:58:11 -07:00
|
|
|
}
|
2021-04-17 07:44:07 -07:00
|
|
|
// eslint-disable-next-line id-denylist
|
2021-07-10 02:34:41 -07:00
|
|
|
} catch (err) {
|
2021-03-10 06:51:18 -08:00
|
|
|
// We don't want errors here to crash n8n. Just log and proceed.
|
|
|
|
console.log('Error removing saved execution from database. More details: ', err);
|
|
|
|
}
|
2021-04-17 07:44:07 -07:00
|
|
|
|
2021-02-08 23:59:32 -08:00
|
|
|
resolve(runData);
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2022-06-02 03:17:24 -07:00
|
|
|
workflowExecution.catch(() => {
|
|
|
|
// We `reject` this promise if the execution fails
|
|
|
|
// but the error is handled already by processError
|
|
|
|
// So we're just preventing crashes here.
|
|
|
|
});
|
|
|
|
|
2021-02-08 23:59:32 -08:00
|
|
|
this.activeExecutions.attachWorkflowExecution(executionId, workflowExecution);
|
|
|
|
return executionId;
|
|
|
|
}
|
|
|
|
|
2020-01-17 17:34:31 -08:00
|
|
|
/**
|
|
|
|
* Run the workflow
|
|
|
|
*
|
|
|
|
* @param {boolean} [loadStaticData] If set will the static data be loaded from
|
|
|
|
* the workflow and added to input data
|
|
|
|
*/
|
2021-08-21 05:11:32 -07:00
|
|
|
async runSubprocess(
|
|
|
|
data: IWorkflowExecutionDataProcess,
|
|
|
|
loadStaticData?: boolean,
|
|
|
|
restartExecutionId?: string,
|
2021-11-05 09:45:51 -07:00
|
|
|
responsePromise?: IDeferredPromise<IExecuteResponsePromiseData>,
|
2021-08-21 05:11:32 -07:00
|
|
|
): Promise<string> {
|
2023-01-02 08:42:32 -08:00
|
|
|
const workflowId = data.workflowData.id;
|
2021-04-17 07:44:07 -07:00
|
|
|
let startedAt = new Date();
|
2019-08-09 04:12:00 -07:00
|
|
|
const subprocess = fork(pathJoin(__dirname, 'WorkflowRunnerProcess.js'));
|
2019-08-08 11:38:25 -07:00
|
|
|
|
2023-01-02 08:42:32 -08:00
|
|
|
if (loadStaticData === true && workflowId) {
|
|
|
|
data.workflowData.staticData = await WorkflowHelpers.getStaticDataById(workflowId);
|
2019-10-14 22:36:53 -07:00
|
|
|
}
|
|
|
|
|
2023-04-06 01:18:19 -07:00
|
|
|
data.restartExecutionId = restartExecutionId;
|
|
|
|
|
2019-08-08 11:38:25 -07:00
|
|
|
// Register the active execution
|
2021-08-21 05:11:32 -07:00
|
|
|
const executionId = await this.activeExecutions.add(data, subprocess, restartExecutionId);
|
2019-08-08 11:38:25 -07:00
|
|
|
|
|
|
|
(data as unknown as IWorkflowExecutionDataProcessWithExecution).executionId = executionId;
|
|
|
|
|
2019-12-19 14:07:55 -08:00
|
|
|
const workflowHooks = WorkflowExecuteAdditionalData.getWorkflowHooksMain(data, executionId);
|
2019-08-08 11:38:25 -07:00
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
try {
|
|
|
|
// Send all data to subprocess it needs to run the workflow
|
|
|
|
subprocess.send({ type: 'startWorkflow', data } as IProcessMessage);
|
|
|
|
} catch (error) {
|
|
|
|
await this.processError(error, new Date(), data.executionMode, executionId, workflowHooks);
|
|
|
|
return executionId;
|
|
|
|
}
|
2019-08-08 11:38:25 -07:00
|
|
|
|
2020-07-29 05:12:54 -07:00
|
|
|
// Start timeout for the execution
|
|
|
|
let executionTimeout: NodeJS.Timeout;
|
2023-03-24 05:11:48 -07:00
|
|
|
|
|
|
|
const workflowSettings = data.workflowData.settings ?? {};
|
|
|
|
let workflowTimeout = workflowSettings.executionTimeout ?? config.getEnv('executions.timeout'); // initialize with default
|
2020-07-29 05:12:54 -07:00
|
|
|
|
2021-04-17 07:44:07 -07:00
|
|
|
const processTimeoutFunction = (timeout: number) => {
|
2023-05-23 17:01:45 -07:00
|
|
|
void this.activeExecutions.stopExecution(executionId, 'timeout');
|
2021-04-17 07:44:07 -07:00
|
|
|
executionTimeout = setTimeout(() => subprocess.kill(), Math.max(timeout * 0.2, 5000)); // minimum 5 seconds
|
2021-04-17 08:23:48 -07:00
|
|
|
};
|
2020-07-29 05:12:54 -07:00
|
|
|
|
2021-04-17 07:44:07 -07:00
|
|
|
if (workflowTimeout > 0) {
|
2022-04-08 10:37:27 -07:00
|
|
|
workflowTimeout = Math.min(workflowTimeout, config.getEnv('executions.maxTimeout')) * 1000; // as seconds
|
2021-04-17 07:44:07 -07:00
|
|
|
// Start timeout already now but give process at least 5 seconds to start.
|
|
|
|
// Without it could would it be possible that the workflow executions times out before it even got started if
|
|
|
|
// the timeout time is very short as the process start time can be quite long.
|
|
|
|
executionTimeout = setTimeout(
|
|
|
|
processTimeoutFunction,
|
|
|
|
Math.max(5000, workflowTimeout),
|
|
|
|
workflowTimeout,
|
|
|
|
);
|
2020-07-29 05:12:54 -07:00
|
|
|
}
|
|
|
|
|
2021-04-17 07:44:07 -07:00
|
|
|
// Create a list of child spawned executions
|
|
|
|
// If after the child process exits we have
|
|
|
|
// outstanding executions, we remove them
|
|
|
|
const childExecutionIds: string[] = [];
|
2020-07-29 05:12:54 -07:00
|
|
|
|
2019-08-08 11:38:25 -07:00
|
|
|
// Listen to data from the subprocess
|
2021-02-20 04:51:06 -08:00
|
|
|
subprocess.on('message', async (message: IProcessMessage) => {
|
2021-05-01 20:43:01 -07:00
|
|
|
Logger.debug(
|
|
|
|
`Received child process message of type ${message.type} for execution ID ${executionId}.`,
|
|
|
|
{ executionId },
|
|
|
|
);
|
2021-04-17 07:44:07 -07:00
|
|
|
if (message.type === 'start') {
|
|
|
|
// Now that the execution actually started set the timeout again so that does not time out to early.
|
|
|
|
startedAt = new Date();
|
|
|
|
if (workflowTimeout > 0) {
|
|
|
|
clearTimeout(executionTimeout);
|
|
|
|
executionTimeout = setTimeout(processTimeoutFunction, workflowTimeout, workflowTimeout);
|
|
|
|
}
|
|
|
|
} else if (message.type === 'end') {
|
2020-07-29 05:12:54 -07:00
|
|
|
clearTimeout(executionTimeout);
|
2019-08-08 11:38:25 -07:00
|
|
|
this.activeExecutions.remove(executionId, message.data.runData);
|
2021-11-05 09:45:51 -07:00
|
|
|
} else if (message.type === 'sendResponse') {
|
|
|
|
if (responsePromise) {
|
|
|
|
responsePromise.resolve(WebhookHelpers.decodeWebhookResponse(message.data.response));
|
|
|
|
}
|
2021-05-29 11:41:25 -07:00
|
|
|
} else if (message.type === 'sendMessageToUI') {
|
|
|
|
// eslint-disable-next-line @typescript-eslint/no-unsafe-call
|
|
|
|
WorkflowExecuteAdditionalData.sendMessageToUI.bind({ sessionId: data.sessionId })(
|
|
|
|
message.data.source,
|
|
|
|
message.data.message,
|
|
|
|
);
|
2020-07-29 05:12:54 -07:00
|
|
|
} else if (message.type === 'processError') {
|
|
|
|
clearTimeout(executionTimeout);
|
2021-04-16 09:33:36 -07:00
|
|
|
const executionError = message.data.executionError as ExecutionError;
|
2021-07-10 02:34:41 -07:00
|
|
|
await this.processError(
|
|
|
|
executionError,
|
|
|
|
startedAt,
|
|
|
|
data.executionMode,
|
|
|
|
executionId,
|
|
|
|
workflowHooks,
|
|
|
|
);
|
2019-08-08 11:38:25 -07:00
|
|
|
} else if (message.type === 'processHook') {
|
2019-12-19 14:07:55 -08:00
|
|
|
this.processHookMessage(workflowHooks, message.data as IProcessMessageDataHook);
|
2020-07-29 05:12:54 -07:00
|
|
|
} else if (message.type === 'timeout') {
|
|
|
|
// Execution timed out and its process has been terminated
|
2021-04-16 09:33:36 -07:00
|
|
|
const timeoutError = new WorkflowOperationError('Workflow execution timed out!');
|
2020-07-29 05:12:54 -07:00
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
// No need to add hook here as the subprocess takes care of calling the hooks
|
2023-05-23 17:01:45 -07:00
|
|
|
await this.processError(timeoutError, startedAt, data.executionMode, executionId);
|
2021-02-20 04:51:06 -08:00
|
|
|
} else if (message.type === 'startExecution') {
|
|
|
|
const executionId = await this.activeExecutions.add(message.data.runData);
|
2021-04-17 07:44:07 -07:00
|
|
|
childExecutionIds.push(executionId);
|
2021-02-20 04:51:06 -08:00
|
|
|
subprocess.send({ type: 'executionId', data: { executionId } } as IProcessMessage);
|
|
|
|
} else if (message.type === 'finishExecution') {
|
2021-04-17 07:44:07 -07:00
|
|
|
const executionIdIndex = childExecutionIds.indexOf(message.data.executionId);
|
|
|
|
if (executionIdIndex !== -1) {
|
|
|
|
childExecutionIds.splice(executionIdIndex, 1);
|
|
|
|
}
|
|
|
|
|
2023-04-06 02:36:11 -07:00
|
|
|
if (message.data.result === undefined) {
|
|
|
|
const noDataError = new WorkflowOperationError('Workflow finished with no result data');
|
|
|
|
const subWorkflowHooks = WorkflowExecuteAdditionalData.getWorkflowHooksMain(
|
|
|
|
data,
|
|
|
|
message.data.executionId,
|
|
|
|
);
|
|
|
|
await this.processError(
|
|
|
|
noDataError,
|
|
|
|
startedAt,
|
|
|
|
data.executionMode,
|
|
|
|
message.data?.executionId,
|
|
|
|
subWorkflowHooks,
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
this.activeExecutions.remove(message.data.executionId, message.data.result);
|
|
|
|
}
|
2019-08-08 11:38:25 -07:00
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2020-07-29 05:12:54 -07:00
|
|
|
// Also get informed when the processes does exit especially when it did crash or timed out
|
2021-04-17 07:44:07 -07:00
|
|
|
subprocess.on('exit', async (code, signal) => {
|
2020-07-29 05:12:54 -07:00
|
|
|
if (signal === 'SIGTERM') {
|
2021-05-01 20:43:01 -07:00
|
|
|
Logger.debug(`Subprocess for execution ID ${executionId} timed out.`, { executionId });
|
2020-07-29 05:12:54 -07:00
|
|
|
// Execution timed out and its process has been terminated
|
2021-04-16 09:33:36 -07:00
|
|
|
const timeoutError = new WorkflowOperationError('Workflow execution timed out!');
|
2020-07-29 05:12:54 -07:00
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
await this.processError(
|
|
|
|
timeoutError,
|
|
|
|
startedAt,
|
|
|
|
data.executionMode,
|
|
|
|
executionId,
|
|
|
|
workflowHooks,
|
|
|
|
);
|
2020-07-29 05:12:54 -07:00
|
|
|
} else if (code !== 0) {
|
2021-05-01 20:43:01 -07:00
|
|
|
Logger.debug(
|
|
|
|
`Subprocess for execution ID ${executionId} finished with error code ${code}.`,
|
|
|
|
{ executionId },
|
|
|
|
);
|
2019-08-08 11:38:25 -07:00
|
|
|
// Process did exit with error code, so something went wrong.
|
2021-04-16 09:33:36 -07:00
|
|
|
const executionError = new WorkflowOperationError(
|
2023-02-17 01:54:07 -08:00
|
|
|
'Workflow execution process crashed for an unknown reason!',
|
2021-04-16 09:33:36 -07:00
|
|
|
);
|
2021-08-29 11:58:11 -07:00
|
|
|
|
2021-07-10 02:34:41 -07:00
|
|
|
await this.processError(
|
|
|
|
executionError,
|
|
|
|
startedAt,
|
|
|
|
data.executionMode,
|
|
|
|
executionId,
|
|
|
|
workflowHooks,
|
|
|
|
);
|
2019-08-08 11:38:25 -07:00
|
|
|
}
|
2021-04-17 07:44:07 -07:00
|
|
|
|
|
|
|
for (const executionId of childExecutionIds) {
|
|
|
|
// When the child process exits, if we still have
|
|
|
|
// pending child executions, we mark them as finished
|
|
|
|
// They will display as unknown to the user
|
|
|
|
// Instead of pending forever as executing when it
|
|
|
|
// actually isn't anymore.
|
2023-07-31 02:00:48 -07:00
|
|
|
|
2023-02-17 01:54:07 -08:00
|
|
|
this.activeExecutions.remove(executionId);
|
2021-04-17 07:44:07 -07:00
|
|
|
}
|
|
|
|
|
2020-07-29 05:12:54 -07:00
|
|
|
clearTimeout(executionTimeout);
|
2019-08-08 11:38:25 -07:00
|
|
|
});
|
|
|
|
|
|
|
|
return executionId;
|
|
|
|
}
|
|
|
|
}
|