2022-07-24 08:25:01 -07:00
|
|
|
/* eslint-disable @typescript-eslint/no-unsafe-argument */
|
2021-08-21 05:11:32 -07:00
|
|
|
/* eslint-disable @typescript-eslint/prefer-optional-chain */
|
|
|
|
/* eslint-disable @typescript-eslint/no-shadow */
|
|
|
|
/* eslint-disable @typescript-eslint/no-unsafe-assignment */
|
2019-06-23 03:35:23 -07:00
|
|
|
/* eslint-disable id-denylist */
|
|
|
|
/* eslint-disable prefer-spread */
|
2021-08-21 05:11:32 -07:00
|
|
|
/* eslint-disable @typescript-eslint/prefer-nullish-coalescing */
|
|
|
|
/* eslint-disable @typescript-eslint/no-unsafe-member-access */
|
|
|
|
/* eslint-disable @typescript-eslint/restrict-template-expressions */
|
2023-01-27 05:56:56 -08:00
|
|
|
import type express from 'express';
|
2023-08-01 08:32:30 -07:00
|
|
|
import { Container } from 'typedi';
|
2023-06-16 07:26:35 -07:00
|
|
|
import get from 'lodash/get';
|
2023-05-17 01:06:24 -07:00
|
|
|
import stream from 'stream';
|
|
|
|
import { promisify } from 'util';
|
2023-08-01 08:32:30 -07:00
|
|
|
import formidable from 'formidable';
|
2019-06-23 03:35:23 -07:00
|
|
|
|
2023-09-22 08:22:12 -07:00
|
|
|
import { BinaryDataService, NodeExecuteFunctions } from 'n8n-core';
|
2019-06-23 03:35:23 -07:00
|
|
|
|
2023-01-27 05:56:56 -08:00
|
|
|
import type {
|
2023-05-17 01:06:24 -07:00
|
|
|
IBinaryData,
|
2019-06-23 03:35:23 -07:00
|
|
|
IBinaryKeyData,
|
|
|
|
IDataObject,
|
2021-11-05 09:45:51 -07:00
|
|
|
IDeferredPromise,
|
2019-06-23 03:35:23 -07:00
|
|
|
IExecuteData,
|
2021-11-05 09:45:51 -07:00
|
|
|
IExecuteResponsePromiseData,
|
2023-08-01 08:32:30 -07:00
|
|
|
IHttpRequestMethods,
|
2021-11-05 09:45:51 -07:00
|
|
|
IN8nHttpFullResponse,
|
2019-06-23 03:35:23 -07:00
|
|
|
INode,
|
|
|
|
IRunExecutionData,
|
|
|
|
IWebhookData,
|
2019-11-23 12:57:50 -08:00
|
|
|
IWebhookResponseData,
|
2021-08-21 05:11:32 -07:00
|
|
|
IWorkflowDataProxyAdditionalKeys,
|
2019-06-23 03:35:23 -07:00
|
|
|
IWorkflowExecuteAdditionalData,
|
2023-01-27 05:56:56 -08:00
|
|
|
Workflow,
|
|
|
|
WorkflowExecuteMode,
|
|
|
|
} from 'n8n-workflow';
|
|
|
|
import {
|
2023-03-09 09:13:15 -08:00
|
|
|
BINARY_ENCODING,
|
2023-01-27 05:56:56 -08:00
|
|
|
createDeferredPromise,
|
2022-11-04 09:34:47 -07:00
|
|
|
ErrorReporterProxy as ErrorReporter,
|
2021-05-01 20:43:01 -07:00
|
|
|
LoggerProxy as Logger,
|
2019-06-23 03:35:23 -07:00
|
|
|
NodeHelpers,
|
|
|
|
} from 'n8n-workflow';
|
2021-12-23 13:29:04 -08:00
|
|
|
|
2023-01-27 05:56:56 -08:00
|
|
|
import type {
|
2019-06-23 03:35:23 -07:00
|
|
|
IExecutionDb,
|
|
|
|
IResponseCallbackData,
|
2023-08-01 08:32:30 -07:00
|
|
|
IWebhookManager,
|
2019-06-23 03:35:23 -07:00
|
|
|
IWorkflowDb,
|
2019-08-08 11:38:25 -07:00
|
|
|
IWorkflowExecutionDataProcess,
|
2023-08-01 08:32:30 -07:00
|
|
|
WebhookCORSRequest,
|
|
|
|
WebhookRequest,
|
2022-11-09 06:25:00 -08:00
|
|
|
} from '@/Interfaces';
|
|
|
|
import * as GenericHelpers from '@/GenericHelpers';
|
|
|
|
import * as ResponseHelper from '@/ResponseHelper';
|
|
|
|
import * as WorkflowHelpers from '@/WorkflowHelpers';
|
|
|
|
import { WorkflowRunner } from '@/WorkflowRunner';
|
|
|
|
import * as WorkflowExecuteAdditionalData from '@/WorkflowExecuteAdditionalData';
|
2023-02-21 10:21:56 -08:00
|
|
|
import { ActiveExecutions } from '@/ActiveExecutions';
|
2023-01-27 05:56:56 -08:00
|
|
|
import type { User } from '@db/entities/User';
|
|
|
|
import type { WorkflowEntity } from '@db/entities/WorkflowEntity';
|
2023-07-18 02:28:24 -07:00
|
|
|
import { EventsService } from '@/services/events.service';
|
2023-07-31 02:37:09 -07:00
|
|
|
import { OwnershipService } from './services/ownership.service';
|
2023-08-11 00:18:33 -07:00
|
|
|
import { parseBody } from './middlewares';
|
2023-09-05 04:42:31 -07:00
|
|
|
import { WorkflowsService } from './workflows/workflows.services';
|
2019-06-23 03:35:23 -07:00
|
|
|
|
2023-05-17 01:06:24 -07:00
|
|
|
const pipeline = promisify(stream.pipeline);
|
|
|
|
|
2023-08-01 08:32:30 -07:00
|
|
|
export const WEBHOOK_METHODS: IHttpRequestMethods[] = [
|
|
|
|
'DELETE',
|
|
|
|
'GET',
|
|
|
|
'HEAD',
|
|
|
|
'PATCH',
|
|
|
|
'POST',
|
|
|
|
'PUT',
|
|
|
|
];
|
|
|
|
|
|
|
|
export const webhookRequestHandler =
|
|
|
|
(webhookManager: IWebhookManager) =>
|
|
|
|
async (req: WebhookRequest | WebhookCORSRequest, res: express.Response) => {
|
|
|
|
const { path } = req.params;
|
|
|
|
const method = req.method;
|
|
|
|
|
|
|
|
if (method !== 'OPTIONS' && !WEBHOOK_METHODS.includes(method)) {
|
|
|
|
return ResponseHelper.sendErrorResponse(
|
|
|
|
res,
|
|
|
|
new Error(`The method ${method} is not supported.`),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup CORS headers only if the incoming request has an `origin` header
|
|
|
|
if ('origin' in req.headers) {
|
|
|
|
if (webhookManager.getWebhookMethods) {
|
|
|
|
try {
|
|
|
|
const allowedMethods = await webhookManager.getWebhookMethods(path);
|
|
|
|
res.header('Access-Control-Allow-Methods', ['OPTIONS', ...allowedMethods].join(', '));
|
|
|
|
} catch (error) {
|
|
|
|
return ResponseHelper.sendErrorResponse(res, error as Error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
res.header('Access-Control-Allow-Origin', req.headers.origin);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (method === 'OPTIONS') {
|
|
|
|
return ResponseHelper.sendSuccessResponse(res, {}, true, 204);
|
|
|
|
}
|
|
|
|
|
|
|
|
let response;
|
|
|
|
try {
|
|
|
|
response = await webhookManager.executeWebhook(req, res);
|
|
|
|
} catch (error) {
|
|
|
|
return ResponseHelper.sendErrorResponse(res, error as Error);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't respond, if already responded
|
|
|
|
if (response.noWebhookResponse !== true) {
|
2023-08-14 03:38:17 -07:00
|
|
|
ResponseHelper.sendSuccessResponse(
|
|
|
|
res,
|
|
|
|
response.data,
|
|
|
|
true,
|
|
|
|
response.responseCode,
|
|
|
|
response.headers,
|
|
|
|
);
|
2023-08-01 08:32:30 -07:00
|
|
|
}
|
|
|
|
};
|
2022-02-20 07:44:30 -08:00
|
|
|
|
2019-06-23 03:35:23 -07:00
|
|
|
/**
|
2022-12-20 01:52:01 -08:00
|
|
|
* Returns all the webhooks which should be created for the given workflow
|
2019-06-23 03:35:23 -07:00
|
|
|
*/
|
2021-08-21 05:11:32 -07:00
|
|
|
export function getWorkflowWebhooks(
|
|
|
|
workflow: Workflow,
|
|
|
|
additionalData: IWorkflowExecuteAdditionalData,
|
|
|
|
destinationNode?: string,
|
2022-09-02 07:13:17 -07:00
|
|
|
ignoreRestartWebhooks = false,
|
2021-08-21 05:11:32 -07:00
|
|
|
): IWebhookData[] {
|
2019-06-23 03:35:23 -07:00
|
|
|
// Check all the nodes in the workflow if they have webhooks
|
|
|
|
|
|
|
|
const returnData: IWebhookData[] = [];
|
|
|
|
|
|
|
|
let parentNodes: string[] | undefined;
|
|
|
|
if (destinationNode !== undefined) {
|
|
|
|
parentNodes = workflow.getParentNodes(destinationNode);
|
2019-11-17 05:43:54 -08:00
|
|
|
// Also add the destination node in case it itself is a webhook node
|
|
|
|
parentNodes.push(destinationNode);
|
2019-06-23 03:35:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
for (const node of Object.values(workflow.nodes)) {
|
|
|
|
if (parentNodes !== undefined && !parentNodes.includes(node.name)) {
|
|
|
|
// If parentNodes are given check only them if they have webhooks
|
|
|
|
// and no other ones
|
2023-07-31 02:00:48 -07:00
|
|
|
|
2019-06-23 03:35:23 -07:00
|
|
|
continue;
|
|
|
|
}
|
2021-08-21 05:11:32 -07:00
|
|
|
returnData.push.apply(
|
|
|
|
returnData,
|
2022-09-02 07:13:17 -07:00
|
|
|
NodeHelpers.getNodeWebhooks(workflow, node, additionalData, ignoreRestartWebhooks),
|
2021-08-21 05:11:32 -07:00
|
|
|
);
|
2019-06-23 03:35:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return returnData;
|
|
|
|
}
|
|
|
|
|
2021-11-05 09:45:51 -07:00
|
|
|
export function decodeWebhookResponse(
|
|
|
|
response: IExecuteResponsePromiseData,
|
|
|
|
): IExecuteResponsePromiseData {
|
|
|
|
if (
|
|
|
|
typeof response === 'object' &&
|
|
|
|
typeof response.body === 'object' &&
|
|
|
|
(response.body as IDataObject)['__@N8nEncodedBuffer@__']
|
|
|
|
) {
|
|
|
|
response.body = Buffer.from(
|
|
|
|
(response.body as IDataObject)['__@N8nEncodedBuffer@__'] as string,
|
|
|
|
BINARY_ENCODING,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
return response;
|
|
|
|
}
|
|
|
|
|
|
|
|
export function encodeWebhookResponse(
|
|
|
|
response: IExecuteResponsePromiseData,
|
|
|
|
): IExecuteResponsePromiseData {
|
|
|
|
if (typeof response === 'object' && Buffer.isBuffer(response.body)) {
|
|
|
|
response.body = {
|
2022-07-24 08:25:01 -07:00
|
|
|
// eslint-disable-next-line @typescript-eslint/naming-convention
|
2021-11-05 09:45:51 -07:00
|
|
|
'__@N8nEncodedBuffer@__': response.body.toString(BINARY_ENCODING),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
return response;
|
|
|
|
}
|
|
|
|
|
2019-06-23 03:35:23 -07:00
|
|
|
/**
|
|
|
|
* Executes a webhook
|
|
|
|
*/
|
2021-08-21 05:11:32 -07:00
|
|
|
export async function executeWebhook(
|
|
|
|
workflow: Workflow,
|
|
|
|
webhookData: IWebhookData,
|
|
|
|
workflowData: IWorkflowDb,
|
|
|
|
workflowStartNode: INode,
|
|
|
|
executionMode: WorkflowExecuteMode,
|
|
|
|
sessionId: string | undefined,
|
|
|
|
runExecutionData: IRunExecutionData | undefined,
|
|
|
|
executionId: string | undefined,
|
2023-08-01 08:32:30 -07:00
|
|
|
req: WebhookRequest,
|
2021-08-21 05:11:32 -07:00
|
|
|
res: express.Response,
|
|
|
|
responseCallback: (error: Error | null, data: IResponseCallbackData) => void,
|
2022-12-27 03:50:50 -08:00
|
|
|
destinationNode?: string,
|
2021-08-21 05:11:32 -07:00
|
|
|
): Promise<string | undefined> {
|
2019-06-23 03:35:23 -07:00
|
|
|
// Get the nodeType to know which responseMode is set
|
2021-09-21 10:38:24 -07:00
|
|
|
const nodeType = workflow.nodeTypes.getByNameAndVersion(
|
|
|
|
workflowStartNode.type,
|
|
|
|
workflowStartNode.typeVersion,
|
|
|
|
);
|
2019-06-23 03:35:23 -07:00
|
|
|
if (nodeType === undefined) {
|
2022-05-05 07:53:35 -07:00
|
|
|
const errorMessage = `The type of the webhook node "${workflowStartNode.name}" is not known`;
|
2019-06-23 03:35:23 -07:00
|
|
|
responseCallback(new Error(errorMessage), {});
|
2022-11-22 05:00:36 -08:00
|
|
|
throw new ResponseHelper.InternalServerError(errorMessage);
|
2019-06-23 03:35:23 -07:00
|
|
|
}
|
|
|
|
|
2021-08-21 05:11:32 -07:00
|
|
|
const additionalKeys: IWorkflowDataProxyAdditionalKeys = {
|
|
|
|
$executionId: executionId,
|
|
|
|
};
|
|
|
|
|
2022-04-10 02:33:42 -07:00
|
|
|
let user: User;
|
|
|
|
if (
|
|
|
|
(workflowData as WorkflowEntity).shared?.length &&
|
|
|
|
(workflowData as WorkflowEntity).shared[0].user
|
|
|
|
) {
|
|
|
|
user = (workflowData as WorkflowEntity).shared[0].user;
|
|
|
|
} else {
|
|
|
|
try {
|
2023-07-31 02:37:09 -07:00
|
|
|
user = await Container.get(OwnershipService).getWorkflowOwnerCached(workflowData.id);
|
2022-04-10 02:33:42 -07:00
|
|
|
} catch (error) {
|
2022-11-22 05:00:36 -08:00
|
|
|
throw new ResponseHelper.NotFoundError('Cannot find workflow');
|
2022-04-10 02:33:42 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare everything that is needed to run the workflow
|
|
|
|
const additionalData = await WorkflowExecuteAdditionalData.getBase(user.id);
|
|
|
|
|
2019-06-23 03:35:23 -07:00
|
|
|
// Get the responseMode
|
2021-08-21 05:11:32 -07:00
|
|
|
const responseMode = workflow.expression.getSimpleParameterValue(
|
|
|
|
workflowStartNode,
|
|
|
|
webhookData.webhookDescription.responseMode,
|
|
|
|
executionMode,
|
2022-04-10 02:33:42 -07:00
|
|
|
additionalData.timezone,
|
2021-08-21 05:11:32 -07:00
|
|
|
additionalKeys,
|
2022-06-03 08:25:07 -07:00
|
|
|
undefined,
|
2021-08-21 05:11:32 -07:00
|
|
|
'onReceived',
|
|
|
|
);
|
|
|
|
const responseCode = workflow.expression.getSimpleParameterValue(
|
|
|
|
workflowStartNode,
|
|
|
|
webhookData.webhookDescription.responseCode,
|
|
|
|
executionMode,
|
2022-04-10 02:33:42 -07:00
|
|
|
additionalData.timezone,
|
2021-08-21 05:11:32 -07:00
|
|
|
additionalKeys,
|
2022-06-03 08:25:07 -07:00
|
|
|
undefined,
|
2021-08-21 05:11:32 -07:00
|
|
|
200,
|
|
|
|
) as number;
|
2019-06-23 03:35:23 -07:00
|
|
|
|
2022-02-19 03:37:41 -08:00
|
|
|
const responseData = workflow.expression.getSimpleParameterValue(
|
|
|
|
workflowStartNode,
|
|
|
|
webhookData.webhookDescription.responseData,
|
|
|
|
executionMode,
|
2022-04-10 02:33:42 -07:00
|
|
|
additionalData.timezone,
|
2022-02-19 03:37:41 -08:00
|
|
|
additionalKeys,
|
2022-06-03 08:25:07 -07:00
|
|
|
undefined,
|
2022-02-19 03:37:41 -08:00
|
|
|
'firstEntryJson',
|
|
|
|
);
|
|
|
|
|
|
|
|
if (!['onReceived', 'lastNode', 'responseNode'].includes(responseMode as string)) {
|
2019-06-23 03:35:23 -07:00
|
|
|
// If the mode is not known we error. Is probably best like that instead of using
|
|
|
|
// the default that people know as early as possible (probably already testing phase)
|
|
|
|
// that something does not resolve properly.
|
2022-05-05 07:53:35 -07:00
|
|
|
const errorMessage = `The response mode '${responseMode}' is not valid!`;
|
2019-06-23 03:35:23 -07:00
|
|
|
responseCallback(new Error(errorMessage), {});
|
2022-11-22 05:00:36 -08:00
|
|
|
throw new ResponseHelper.InternalServerError(errorMessage);
|
2019-06-23 03:35:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add the Response and Request so that this data can be accessed in the node
|
|
|
|
additionalData.httpRequest = req;
|
|
|
|
additionalData.httpResponse = res;
|
|
|
|
|
2023-08-01 08:32:30 -07:00
|
|
|
const binaryData = workflow.expression.getSimpleParameterValue(
|
|
|
|
workflowStartNode,
|
|
|
|
'={{$parameter["options"]["binaryData"]}}',
|
|
|
|
executionMode,
|
|
|
|
additionalData.timezone,
|
|
|
|
additionalKeys,
|
|
|
|
undefined,
|
|
|
|
false,
|
|
|
|
);
|
|
|
|
|
2019-06-23 03:35:23 -07:00
|
|
|
let didSendResponse = false;
|
2019-11-23 12:57:50 -08:00
|
|
|
let runExecutionDataMerge = {};
|
2019-06-23 03:35:23 -07:00
|
|
|
try {
|
|
|
|
// Run the webhook function to see what should be returned and if
|
|
|
|
// the workflow should be executed or not
|
2019-11-23 12:57:50 -08:00
|
|
|
let webhookResultData: IWebhookResponseData;
|
2019-06-23 03:35:23 -07:00
|
|
|
|
2023-08-01 08:32:30 -07:00
|
|
|
// if `Webhook` or `Wait` node, and binaryData is enabled, skip pre-parse the request-body
|
|
|
|
if (!binaryData) {
|
|
|
|
const { contentType, encoding } = req;
|
|
|
|
if (contentType === 'multipart/form-data') {
|
|
|
|
const form = formidable({
|
|
|
|
multiples: true,
|
|
|
|
encoding: encoding as formidable.BufferEncoding,
|
|
|
|
// TODO: pass a custom `fileWriteStreamHandler` to create binary data files directly
|
|
|
|
});
|
|
|
|
req.body = await new Promise((resolve) => {
|
|
|
|
form.parse(req, async (err, data, files) => {
|
2023-08-18 03:34:42 -07:00
|
|
|
for (const key in data) {
|
|
|
|
if (Array.isArray(data[key]) && data[key].length === 1) {
|
|
|
|
data[key] = data[key][0];
|
|
|
|
}
|
|
|
|
}
|
2023-08-01 08:32:30 -07:00
|
|
|
resolve({ data, files });
|
|
|
|
});
|
|
|
|
});
|
|
|
|
} else {
|
2023-08-11 00:18:33 -07:00
|
|
|
await parseBody(req);
|
2023-08-01 08:32:30 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-23 12:57:50 -08:00
|
|
|
try {
|
2020-01-22 15:06:43 -08:00
|
|
|
webhookResultData = await workflow.runWebhook(
|
|
|
|
webhookData,
|
|
|
|
workflowStartNode,
|
|
|
|
additionalData,
|
|
|
|
NodeExecuteFunctions,
|
|
|
|
executionMode,
|
|
|
|
);
|
2023-07-18 02:28:24 -07:00
|
|
|
Container.get(EventsService).emit('nodeFetchedData', workflow.id, workflowStartNode);
|
2021-04-16 09:33:36 -07:00
|
|
|
} catch (err) {
|
2019-11-23 12:57:50 -08:00
|
|
|
// Send error response to webhook caller
|
|
|
|
const errorMessage = 'Workflow Webhook Error: Workflow could not be started!';
|
|
|
|
responseCallback(new Error(errorMessage), {});
|
|
|
|
didSendResponse = true;
|
|
|
|
|
|
|
|
// Add error to execution data that it can be logged and send to Editor-UI
|
|
|
|
runExecutionDataMerge = {
|
|
|
|
resultData: {
|
|
|
|
runData: {},
|
|
|
|
lastNodeExecuted: workflowStartNode.name,
|
|
|
|
error: {
|
2021-04-16 09:33:36 -07:00
|
|
|
...err,
|
|
|
|
message: err.message,
|
|
|
|
stack: err.stack,
|
2019-11-23 12:57:50 -08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
webhookResultData = {
|
|
|
|
noWebhookResponse: true,
|
|
|
|
// Add empty data that it at least tries to "execute" the webhook
|
|
|
|
// which then so gets the chance to throw the error.
|
|
|
|
workflowData: [[{ json: {} }]],
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-06-20 03:28:56 -07:00
|
|
|
// Save static data if it changed
|
2023-09-05 04:42:31 -07:00
|
|
|
await WorkflowsService.saveStaticData(workflow);
|
2020-06-20 03:28:56 -07:00
|
|
|
|
2021-08-21 05:11:32 -07:00
|
|
|
const additionalKeys: IWorkflowDataProxyAdditionalKeys = {
|
|
|
|
$executionId: executionId,
|
|
|
|
};
|
|
|
|
|
2020-04-26 02:01:20 -07:00
|
|
|
if (webhookData.webhookDescription.responseHeaders !== undefined) {
|
2021-08-21 05:11:32 -07:00
|
|
|
const responseHeaders = workflow.expression.getComplexParameterValue(
|
|
|
|
workflowStartNode,
|
|
|
|
webhookData.webhookDescription.responseHeaders,
|
|
|
|
executionMode,
|
2022-04-10 02:33:42 -07:00
|
|
|
additionalData.timezone,
|
2021-08-21 05:11:32 -07:00
|
|
|
additionalKeys,
|
|
|
|
undefined,
|
2022-06-03 08:25:07 -07:00
|
|
|
undefined,
|
2021-08-21 05:11:32 -07:00
|
|
|
) as {
|
2020-04-26 06:28:51 -07:00
|
|
|
entries?:
|
|
|
|
| Array<{
|
|
|
|
name: string;
|
|
|
|
value: string;
|
|
|
|
}>
|
|
|
|
| undefined;
|
|
|
|
};
|
2020-04-26 02:01:20 -07:00
|
|
|
|
|
|
|
if (responseHeaders !== undefined && responseHeaders.entries !== undefined) {
|
|
|
|
for (const item of responseHeaders.entries) {
|
2020-04-26 06:28:51 -07:00
|
|
|
res.setHeader(item.name, item.value);
|
2020-04-26 02:01:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-23 12:57:50 -08:00
|
|
|
if (webhookResultData.noWebhookResponse === true && !didSendResponse) {
|
2019-06-23 03:35:23 -07:00
|
|
|
// The response got already send
|
|
|
|
responseCallback(null, {
|
|
|
|
noWebhookResponse: true,
|
|
|
|
});
|
|
|
|
didSendResponse = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (webhookResultData.workflowData === undefined) {
|
|
|
|
// Workflow should not run
|
|
|
|
if (webhookResultData.webhookResponse !== undefined) {
|
|
|
|
// Data to respond with is given
|
2019-11-23 12:57:50 -08:00
|
|
|
if (!didSendResponse) {
|
|
|
|
responseCallback(null, {
|
|
|
|
data: webhookResultData.webhookResponse,
|
|
|
|
responseCode,
|
|
|
|
});
|
|
|
|
didSendResponse = true;
|
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
} else {
|
|
|
|
// Send default response
|
2023-07-31 02:00:48 -07:00
|
|
|
|
2019-11-23 12:57:50 -08:00
|
|
|
if (!didSendResponse) {
|
|
|
|
responseCallback(null, {
|
|
|
|
data: {
|
2022-05-05 07:53:35 -07:00
|
|
|
message: 'Webhook call received',
|
2019-11-23 12:57:50 -08:00
|
|
|
},
|
|
|
|
responseCode,
|
|
|
|
});
|
|
|
|
didSendResponse = true;
|
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-11-13 14:31:27 -08:00
|
|
|
// Now that we know that the workflow should run we can return the default response
|
2022-09-02 07:13:17 -07:00
|
|
|
// directly if responseMode it set to "onReceived" and a response should be sent
|
2019-08-28 08:16:09 -07:00
|
|
|
if (responseMode === 'onReceived' && !didSendResponse) {
|
2019-06-23 03:35:23 -07:00
|
|
|
// Return response directly and do not wait for the workflow to finish
|
2022-02-19 03:37:41 -08:00
|
|
|
if (responseData === 'noData') {
|
|
|
|
// Return without data
|
|
|
|
responseCallback(null, {
|
|
|
|
responseCode,
|
|
|
|
});
|
|
|
|
} else if (webhookResultData.webhookResponse !== undefined) {
|
2019-06-23 03:35:23 -07:00
|
|
|
// Data to respond with is given
|
|
|
|
responseCallback(null, {
|
|
|
|
data: webhookResultData.webhookResponse,
|
2019-08-28 08:03:35 -07:00
|
|
|
responseCode,
|
2019-06-23 03:35:23 -07:00
|
|
|
});
|
|
|
|
} else {
|
|
|
|
responseCallback(null, {
|
|
|
|
data: {
|
2022-05-05 07:53:35 -07:00
|
|
|
message: 'Workflow was started',
|
2019-08-28 08:03:35 -07:00
|
|
|
},
|
|
|
|
responseCode,
|
2019-06-23 03:35:23 -07:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
didSendResponse = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the data of the webhook node
|
|
|
|
const nodeExecutionStack: IExecuteData[] = [];
|
|
|
|
nodeExecutionStack.push({
|
|
|
|
node: workflowStartNode,
|
|
|
|
data: {
|
|
|
|
main: webhookResultData.workflowData,
|
|
|
|
},
|
2022-06-03 08:25:07 -07:00
|
|
|
source: null,
|
2021-08-29 11:58:11 -07:00
|
|
|
});
|
|
|
|
|
2021-08-21 05:11:32 -07:00
|
|
|
runExecutionData =
|
|
|
|
runExecutionData ||
|
2021-08-29 11:58:11 -07:00
|
|
|
({
|
2019-06-23 03:35:23 -07:00
|
|
|
startData: {},
|
2019-11-23 12:57:50 -08:00
|
|
|
resultData: {
|
2019-06-23 03:35:23 -07:00
|
|
|
runData: {},
|
2021-08-29 11:58:11 -07:00
|
|
|
},
|
2019-06-23 03:35:23 -07:00
|
|
|
executionData: {
|
|
|
|
contextData: {},
|
|
|
|
nodeExecutionStack,
|
|
|
|
waitingExecution: {},
|
2021-08-29 11:58:11 -07:00
|
|
|
},
|
2021-08-21 05:11:32 -07:00
|
|
|
} as IRunExecutionData);
|
|
|
|
|
2022-12-27 03:50:50 -08:00
|
|
|
if (destinationNode && runExecutionData.startData) {
|
|
|
|
runExecutionData.startData.destinationNode = destinationNode;
|
|
|
|
}
|
|
|
|
|
2021-08-21 05:11:32 -07:00
|
|
|
if (executionId !== undefined) {
|
|
|
|
// Set the data the webhook node did return on the waiting node if executionId
|
|
|
|
// already exists as it means that we are restarting an existing execution.
|
|
|
|
runExecutionData.executionData!.nodeExecutionStack[0].data.main =
|
|
|
|
webhookResultData.workflowData;
|
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
|
2019-11-23 12:57:50 -08:00
|
|
|
if (Object.keys(runExecutionDataMerge).length !== 0) {
|
|
|
|
// If data to merge got defined add it to the execution data
|
|
|
|
Object.assign(runExecutionData, runExecutionDataMerge);
|
|
|
|
}
|
|
|
|
|
2019-08-08 11:38:25 -07:00
|
|
|
const runData: IWorkflowExecutionDataProcess = {
|
|
|
|
executionMode,
|
|
|
|
executionData: runExecutionData,
|
|
|
|
sessionId,
|
|
|
|
workflowData,
|
feat: Add User Management (#2636)
* ✅ adjust tests
* 🛠 refactor user invites to be indempotent (#2791)
* 🔐 Encrypt SMTP pass for user management backend (#2793)
* :package: Add crypto-js to /cli
* :package: Update package-lock.json
* :sparkles: Create type for SMTP config
* :zap: Encrypt SMTP pass
* :zap: Update format for `userManagement.emails.mode`
* :zap: Update format for `binaryDataManager.mode`
* :zap: Update format for `logs.level`
* :fire: Remove logging
* :shirt: Fix lint
* 👰 n8n 2826 um wedding FE<>BE (#2789)
* remove mocks
* update authorization func
* lock down default role
* 🐛 fix requiring authentication for OPTIONS requests
* :bug: fix cors and cookie issues in dev
* update setup route
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* update telemetry
* 🐛 preload role for users
* :bug: remove auth for password reset routes
* 🐛 fix forgot-password flow
* :zap: allow workflow tag disabling
* update telemetry init
* add reset
* clear error notifications on signin
* remove load settings from node view
* remove user id from user state
* inherit existing user props
* go back in history on button click
* use replace to force redirect
* update stories
* :zap: add env check for tag create
* :test_tube: Add `/users` tests for user management backend (#2790)
* :zap: Refactor users namespace
* :zap: Adjust fillout endpoint
* :zap: Refactor initTestServer arg
* :pencil2: Specify agent type
* :pencil2: Specify role type
* :zap: Tighten `/users/:id` check
* :sparkles: Add initial tests
* :truck: Reposition init server map
* :zap: Set constants in `validatePassword()`
* :zap: Tighten `/users/:id` check
* :zap: Improve checks in `/users/:id`
* :sparkles: Add tests for `/users/:id`
* :package: Update package-lock.json
* :zap: Simplify expectation
* :zap: Reuse util for authless agent
* :truck: Make role names consistent
* :blue_book: Tighten namespaces map type
* :fire: Remove unneeded default arg
* :sparkles: Add tests for `POST /users`
* :blue_book: Create test SMTP account type
* :pencil2: Improve wording
* :art: Formatting
* :fire: Remove temp fix
* :zap: Replace helper with config call
* :zap: Fix failing tests
* :fire: Remove outdated test
* :fire: Remove unused helper
* :zap: Increase readability of domain fetcher
* :zap: Refactor payload validation
* :fire: Remove repetition
* :rewind: Restore logging
* :zap: Initialize logger in tests
* :fire: Remove redundancy from check
* :truck: Move `globalOwnerRole` fetching to global scope
* :fire: Remove unused imports
* :truck: Move random utils to own module
* :truck: Move test types to own module
* :pencil2: Add dividers to utils
* :pencil2: Reorder `initTestServer` param docstring
* :pencil2: Add TODO comment
* :zap: Dry up member creation
* :zap: Tighten search criteria
* :test_tube: Add expectation to `GET /users`
* :zap: Create role fetcher utils
* :zap: Create one more role fetch util
* :fire: Remove unneeded DB query
* :test_tube: Add expectation to `POST /users`
* :test_tube: Add expectation to `DELETE /users/:id`
* :test_tube: Add another expectation to `DELETE /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :test_tube: Adjust expectations in `POST /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :shirt: Fix build
* :zap: Update method
* :blue_book: Fix `userToDelete` type
* :zap: Refactor `createAgent()`
* :zap: Make role fetching global
* :zap: Optimize roles fetching
* :zap: Centralize member creation
* :zap: Refactor truncation helper
* :test_tube: Add teardown to `DELETE /users/:id`
* :test_tube: Add DB expectations to users tests
* :fire: Remove pass validation due to hash
* :pencil2: Improve pass validation error message
* :zap: Improve owner pass validation
* :zap: Create logger initialization helper
* :zap: Optimize helpers
* :zap: Restructure `getAllRoles` helper
* :test_tube: Add password reset flow tests for user management backend (#2807)
* :zap: Refactor users namespace
* :zap: Adjust fillout endpoint
* :zap: Refactor initTestServer arg
* :pencil2: Specify agent type
* :pencil2: Specify role type
* :zap: Tighten `/users/:id` check
* :sparkles: Add initial tests
* :truck: Reposition init server map
* :zap: Set constants in `validatePassword()`
* :zap: Tighten `/users/:id` check
* :zap: Improve checks in `/users/:id`
* :sparkles: Add tests for `/users/:id`
* :package: Update package-lock.json
* :zap: Simplify expectation
* :zap: Reuse util for authless agent
* :truck: Make role names consistent
* :blue_book: Tighten namespaces map type
* :fire: Remove unneeded default arg
* :sparkles: Add tests for `POST /users`
* :blue_book: Create test SMTP account type
* :pencil2: Improve wording
* :art: Formatting
* :fire: Remove temp fix
* :zap: Replace helper with config call
* :zap: Fix failing tests
* :fire: Remove outdated test
* :sparkles: Add tests for password reset flow
* :pencil2: Fix test wording
* :zap: Set password reset namespace
* :fire: Remove unused helper
* :zap: Increase readability of domain fetcher
* :zap: Refactor payload validation
* :fire: Remove repetition
* :rewind: Restore logging
* :zap: Initialize logger in tests
* :fire: Remove redundancy from check
* :truck: Move `globalOwnerRole` fetching to global scope
* :fire: Remove unused imports
* :truck: Move random utils to own module
* :truck: Move test types to own module
* :pencil2: Add dividers to utils
* :pencil2: Reorder `initTestServer` param docstring
* :pencil2: Add TODO comment
* :zap: Dry up member creation
* :zap: Tighten search criteria
* :test_tube: Add expectation to `GET /users`
* :zap: Create role fetcher utils
* :zap: Create one more role fetch util
* :fire: Remove unneeded DB query
* :test_tube: Add expectation to `POST /users`
* :test_tube: Add expectation to `DELETE /users/:id`
* :test_tube: Add another expectation to `DELETE /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :test_tube: Adjust expectations in `POST /users/:id`
* :test_tube: Add expectations to `DELETE /users/:id`
* :blue_book: Add namespace name to type
* :truck: Adjust imports
* :zap: Optimize `globalOwnerRole` fetching
* :test_tube: Add expectations
* :shirt: Fix build
* :shirt: Fix build
* :zap: Update method
* :zap: Update method
* :test_tube: Fix `POST /change-password` test
* :blue_book: Fix `userToDelete` type
* :zap: Refactor `createAgent()`
* :zap: Make role fetching global
* :zap: Optimize roles fetching
* :zap: Centralize member creation
* :zap: Refactor truncation helper
* :test_tube: Add teardown to `DELETE /users/:id`
* :test_tube: Add DB expectations to users tests
* :zap: Refactor as in users namespace
* :test_tube: Add expectation to `POST /change-password`
* :fire: Remove pass validation due to hash
* :pencil2: Improve pass validation error message
* :zap: Improve owner pass validation
* :zap: Create logger initialization helper
* :zap: Optimize helpers
* :zap: Restructure `getAllRoles` helper
* :zap: Update `truncate` calls
* :bug: return 200 for non-existing user
* ✅ fix tests for forgot-password and user creation
* Update packages/editor-ui/src/components/MainSidebar.vue
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/components/Telemetry.vue
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/plugins/telemetry/index.ts
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/plugins/telemetry/index.ts
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* Update packages/editor-ui/src/plugins/telemetry/index.ts
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
* :truck: Fix imports
* :zap: reset password just if password exists
* Fix validation at `PATCH /workfows/:id` (#2819)
* :bug: Validate entity only if workflow
* :shirt: Fix build
* 🔨 refactor response from user creation
* 🐛 um email invite fix (#2833)
* update users invite
* fix notificaitons stacking on top of each other
* remove unnessary check
* fix type issues
* update structure
* fix types
* 🐘 database migrations UM + password reset expiration (#2710)
* Add table prefix and assign existing workflows and credentials to owner for sqlite
* Added user management migration to MySQL
* Fixed some missing table prefixes and removed unnecessary user id
* Created migration for postgres and applies minor fixes
* Fixed migration for sqlite by removing the unnecessary index and for mysql by removing unnecessary user data
* Added password reset token expiration
* Addressing comments made by Ben
* ⚡️ add missing tablePrefix
* ✅ fix tests + add tests for expiring pw-reset-token
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* :zap: treat skipped personalizationSurvey as not answered
* :bug: removing active workflows when deleting user, :bug: fix reinvite, :bug: fix resolve-signup-token, 🐘 remove workflowname uniqueness
* ✅ Add DB state check tests (#2841)
* :fire: Remove unneeded import
* :fire: Remove unneeded vars
* :pencil2: Improve naming
* :test_tube: Add expectations to `POST /owner`
* :test_tube: Add expectations to `PATCH /me`
* :test_tube: Add expectation to `PATCH /me/password`
* :pencil2: Clarify when owner is owner shell
* :test_tube: Add more expectations
* :rewind: Restore package-lock to parent branch state
* Add logging to user management endpoints v2 (#2836)
* :zap: Initialize logger in tests
* :zap: Add logs to mailer
* :zap: Add logs to middleware
* :zap: Add logs to me endpoints
* :zap: Add logs to owner endpoints
* :zap: Add logs to pass flow endpoints
* :zap: Add logs to users endpoints
* :blue_book: Improve typings
* :zap: Merge two logs into one
* :zap: Adjust log type
* :zap: Add password reset email log
* :pencil2: Reword log message
* :zap: Adjust log meta object
* :zap: Add total to log
* :pencil2: Add detail to log message
* :pencil2: Reword log message
* :pencil2: Reword log message
* :bug: Make total users to set up accurate
* :pencil2: Reword `Logger.debug()` messages
* :pencil2: Phrasing change for consistency
* :bug: Fix ID overridden in range query
* :hammer: small refactoring
* 🔐 add auth to push-connection
* 🛠 ✅ Create credentials namespace and add tests (#2831)
* :test_tube: Fix failing test
* :blue_book: Improve `createAgent` signature
* :truck: Fix `LoggerProxy` import
* :sparkles: Create credentials endpoints namespace
* :test_tube: Set up initial tests
* :zap: Add validation to model
* :zap: Adjust validation
* :test_tube: Add test
* :truck: Sort creds endpoints
* :pencil2: Plan out pending tests
* :test_tube: Add deletion tests
* :test_tube: Add patch tests
* :test_tube: Add get cred tests
* :truck: Hoist import
* :pencil2: Make test descriptions consistent
* :pencil2: Adjust description
* :test_tube: Add missing test
* :pencil2: Make get descriptions consistent
* :rewind: Undo line break
* :zap: Refactor to simplify `saveCredential`
* :test_tube: Add non-owned tests for owner
* :pencil2: Improve naming
* :pencil2: Add clarifying comments
* :truck: Improve imports
* :zap: Initialize config file
* :fire: Remove unneeded import
* :truck: Rename dir
* :zap: Adjust deletion call
* :zap: Adjust error code
* :pencil2: Touch up comment
* :zap: Optimize fetching with `@RelationId`
* :test_tube: Add expectations
* :zap: Simplify mock calls
* :blue_book: Set deep readonly to object constants
* :fire: Remove unused param and encryption key
* :zap: Add more `@RelationId` calls in models
* :rewind: Restore
* :bug: no auth for .svg
* 🛠 move auth cookie name to constant; 🐛 fix auth for push-connection
* ✅ Add auth middleware tests (#2853)
* :zap: Simplify existing suite
* :test_tube: Validate that auth cookie exists
* :pencil2: Move comment
* :fire: Remove unneeded imports
* :pencil2: Add clarifying comments
* :pencil2: Document auth endpoints
* :test_tube: Add middleware tests
* :pencil2: Fix typos
Co-authored-by: Ben Hesseldieck <1849459+BHesseldieck@users.noreply.github.com>
* 🔥 Remove test description wrappers (#2874)
* :fire: Remove /owner test wrappers
* :fire: Remove auth middleware test wrappers
* :fire: Remove auth endpoints test wrappers
* :fire: Remove overlooked middleware wrappers
* :fire: Remove me namespace test wrappers
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* ✨ Runtime checks for credentials load and execute workflows (#2697)
* Runtime checks for credentials load and execute workflows
* Fixed from reviewers
* Changed runtime validation for credentials to be on start instead of on demand
* Refactored validations to use user id instead of whole User instance
* Removed user entity from workflow project because it is no longer needed
* General fixes and improvements to runtime checks
* Remove query builder and improve styling
* Fix lint issues
* :zap: remove personalizationAnswers when fetching all users
* ✅ fix failing get all users test
* ✅ check authorization routes also for authentication
* :bug: fix defaults in reset command
* 🛠 refactorings from walkthrough (#2856)
* :zap: Make `getTemplate` async
* :zap: Remove query builder from `getCredentials`
* :zap: Add save manual executions log message
* :rewind: Restore and hide migrations logs
* :zap: Centralize ignore paths check
* :shirt: Fix build
* :truck: Rename `hasOwner` to `isInstanceOwnerSetUp`
* :zap: Add `isSetUp` flag to `User`
* :zap: Add `isSetUp` to FE interface
* :zap: Adjust `isSetUp` checks on FE
* :shirt: Fix build
* :zap: Adjust `isPendingUser()` check
* :truck: Shorten helper name
* :zap: Refactor as `isPending` per feedback
* :pencil2: Update log message
* :zap: Broaden check
* :fire: Remove unneeded relation
* :zap: Refactor query
* :fire: Re-remove logs from migrations
* 🛠 set up credentials router (#2882)
* :zap: Refactor creds endpoints into router
* :test_tube: Refactor creds tests to use router
* :truck: Rename arg for consistency
* :truck: Move `credentials.api.ts` outside /public
* :truck: Rename constant for consistency
* :blue_book: Simplify types
* :fire: Remove unneeded arg
* :truck: Rename router to controller
* :zap: Shorten endpoint
* :zap: Update `initTestServer()` arg
* :zap: Mutate response body in GET /credentials
* 🏎 improve performance of type cast for FE
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* :bug: remove GET /login from auth
* 🔀 merge master + FE update (#2905)
* :sparkles: Add Templates (#2720)
* Templates Bugs / Fixed Various Bugs / Multiply Api Request, Carousel Gradient, Core Nodes Filters ...
* Updated MainSidebar Paddings
* N8N-Templates Bugfixing - Remove Unnecesairy Icon (Shape), Refatctor infiniteScrollEnabled Prop + updated infiniterScroll functinality
* N8N-2853 Fixed Carousel Arrows Bug after Cleaning the SearchBar
* fix telemetry init
* fix search tracking issues
* N8N-2853 Created FilterTemplateNode Constant Array, Filter PlayButton and WebhookRespond from Nodes, Added Box for showing more nodes inside TemplateList, Updated NewWorkflowButton to primary, Fixed Markdown issue with Code
* N8N-2853 Removed Placeholder if Workflows Or Collections are not found, Updated the Logic
* fix telemetry events
* clean up session id
* update user inserted event
* N8N-2853 Fixed Categories to Moving if the names are long
* Add todos
* Update Routes on loading
* fix spacing
* Update Border Color
* Update Border Readius
* fix filter fn
* fix constant, console error
* N8N-2853 PR Fixes, Refactoring, Removing unnecesairy code ..
* N8N-2853 PR Fixes - Editor-ui Fixes, Refactoring, Removing Dead Code ...
* N8N-2853 Refactor Card to LongCard
* clean up spacing, replace css var
* clean up spacing
* set categories as optional in node
* replace vars
* refactor store
* remove unnesssary import
* fix error
* fix templates view to start
* add to cache
* fix coll view data
* fix categories
* fix category event
* fix collections carousel
* fix initial load and search
* fix infinite load
* fix query param
* fix scrolling issues
* fix scroll to top
* fix search
* fix collections search
* fix navigation bug
* rename view
* update package lock
* rename workflow view
* rename coll view
* update routes
* add wrapper component
* set session id
* fix search tracking
* fix session tracking
* remove deleted mutation
* remove check for unsupported nodes
* refactor filters
* lazy load template
* clean up types
* refactor infinte scroll
* fix end of search
* Fix spacing
* fix coll loading
* fix types
* fix coll view list
* fix navigation
* rename types
* rename state
* fix search responsiveness
* fix coll view spacing
* fix search view spacing
* clean up views
* set background color
* center page not vert
* fix workflow view
* remove import
* fix background color
* fix background
* clean props
* clean up imports
* refactor button
* update background color
* fix spacing issue
* rename event
* update telemetry event
* update endpoints, add loading view, check for endpoint health
* remove conolse log
* N8N-2853 Fixed Menu Items Padding
* replace endpoints
* fix type issues
* fix categories
* N8N-2853 Fixed ParameterInput Placeholder after ElementUI Upgrade
* update createdAt
* :zap: Fix placeholder in creds config modal
* :pencil2: Adjust docstring to `credText` placeholder version
* N8N-2853 Optimized
* N8N-2853 Optimized code
* :zap: Add deployment type to FE settings
* :zap: Add deployment type to interfaces
* N8N-2853 Removed Animated prop from components
* :zap: Add deployment type to store module
* :sparkles: Create hiring banner
* :zap: Display hiring banner
* :rewind: Undo unrelated change
* N8N-2853 Refactor TemplateFilters
* :zap: Fix indentation
* N8N-2853 Reorder items / TemplateList
* :shirt: Fix lint
* N8N-2853 Refactor TemplateFilters Component
* N8N-2853 Reorder TemplateList
* refactor template card
* update timeout
* fix removelistener
* fix spacing
* split enabled from offline
* add spacing to go back
* N8N-2853 Fixed Screens for Tablet & Mobile
* N8N-2853 Update Stores Order
* remove image componet
* remove placeholder changes
* N8N-2853 Fixed Chinnese Placeholders for El Select Component that comes from the Library Upgrade
* N8N-2853 Fixed Vue Agile Console Warnings
* N8N-2853 Update Collection Route
* :pencil2: Update jobs URL
* :truck: Move logging to root component
* :zap: Refactor `deploymentType` to `isInternalUser`
* :zap: Improve syntax
* fix cut bug in readonly view
* N8N-3012 Fixed Details section in templates with lots of description, Fixed Mardown Block with overflox-x
* N8N-3012 Increased Font-size, Spacing and Line-height of the Categories Items
* N8N-3012 Fixed Vue-agile client width error on resize
* only delay redirect for root path
* N8N-3012 Fixed Carousel Arrows that Disappear
* N8N-3012 Make Loading Screen same color as Templates
* N8N-3012 Markdown renders inline block as block code
* add offline warning
* hide log from workflow iframe
* update text
* make search button larger
* N8N-3012 Categories / Tags extended all the way in details section
* load data in cred modals
* remove deleted message
* add external hook
* remove import
* update env variable description
* fix markdown width issue
* disable telemetry for demo, add session id to template pages
* fix telemetery bugs
* N8N-3012 Not found Collections/Wokrkflow
* N8N-3012 Checkboxes change order when categories are changed
* N8N-3012 Refactor SortedCategories inside TemplateFilters component
* fix firefox bug
* add telemetry requirements
* add error check
* N8N-3012 Update GoBackButton to check if Route History is present
* N8N-3012 Fixed WF Nodes Icons
* hide workflow screenshots
* remove unnessary mixins
* rename prop
* fix design a bit
* rename data
* clear workspace on destroy
* fix copy paste bug
* fix disabled state
* N8N-3012 Fixed Saving/Leave without saving Modal
* fix telemetry issue
* fix telemetry issues, error bug
* fix error notification
* disable workflow menu items on templates
* fix i18n elementui issue
* Remove Emit - NodeType from HoverableNodeIcon component
* TechnicalFixes: NavigateTo passed down as function should be helper
* TechnicalFixes: Update NavigateTo function
* TechnicalFixes: Add FilterCoreNodes directly as function
* check for empty connecitions
* fix titles
* respect new lines
* increase categories to be sliced
* rename prop
* onUseWorkflow
* refactor click event
* fix bug, refactor
* fix loading story
* add default
* fix styles at right level of abstraction
* add wrapper with width
* remove loading blocks component
* add story
* rename prop
* fix spacing
* refactor tag, add story
* move margin to container
* fix tag redirect, remove unnessary check
* make version optional
* rename view
* move from workflows to templates store
* remove unnessary change
* remove unnessary css
* rename component
* refactor collection card
* add boolean to prevent shrink
* clean up carousel
* fix redirection bug on save
* remove listeners to fix multiple listeners bug
* remove unnessary types
* clean up boolean set
* fix node select bug
* rename component
* remove unnessary class
* fix redirection bug
* remove unnessary error
* fix typo
* fix blockquotes, pre
* refactor markdown rendering
* remove console log
* escape markdown
* fix safari bug
* load active workflows to fix modal bug
* :arrow_up: Update package-lock.json file
* :zap: Add n8n version as header
Co-authored-by: Mutasem Aldmour <4711238+mutdmour@users.noreply.github.com>
Co-authored-by: Mutasem <mutdmour@gmail.com>
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
* :bookmark: Release n8n-workflow@0.88.0
* :arrow_up: Set n8n-workflow@0.88.0 on n8n-core
* :bookmark: Release n8n-core@0.106.0
* :arrow_up: Set n8n-core@0.106.0 and n8n-workflow@0.88.0 on n8n-node-dev
* :bookmark: Release n8n-node-dev@0.45.0
* :arrow_up: Set n8n-core@0.106.0 and n8n-workflow@0.88.0 on n8n-nodes-base
* :bookmark: Release n8n-nodes-base@0.163.0
* :bookmark: Release n8n-design-system@0.12.0
* :arrow_up: Set n8n-design-system@0.12.0 and n8n-workflow@0.88.0 on n8n-editor-ui
* :bookmark: Release n8n-editor-ui@0.132.0
* :arrow_up: Set n8n-core@0.106.0, n8n-editor-ui@0.132.0, n8n-nodes-base@0.163.0 and n8n-workflow@0.88.0 on n8n
* :bookmark: Release n8n@0.165.0
* fix default user bug
* fix bug
* update package lock
* fix duplicate import
* fix settings
* fix templates access
Co-authored-by: Oliver Trajceski <olivertrajceski@yahoo.com>
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
* :zap: n8n 2952 personalisation (#2911)
* refactor/update survey
* update customers
* Fix up personalization survey
* fix recommendation logic
* set to false
* hide suggested nodes when empty
* use keys
* add missing logic
* switch types
* Fix logic
* remove unused constants
* add back constant
* refactor filtering inputs
* hide last input on personal
* fix other
* ✨ add current pw check for change password (#2912)
* fix back button
* Add current password input
* add to modal
* update package.json
* delete mock file
* delete mock file
* get settings func
* update router
* update package lock
* update package lock
* Fix invite text
* update error i18n
* open personalization on search if not set
* update error view i18n
* update change password
* update settings sidebar
* remove import
* fix sidebar
* :goal_net: fix error for credential/workflow not found
* update invite modal
* ✨ persist skipping owner setup (#2894)
* 🚧 added skipInstanceOwnerSetup to DB + route to save skipping
* ✨ skipping owner setup persists
* ✅ add tests for authorization and /owner/skip-setup
* 🛠 refactor FE settings getter
* 🛠 move setting setup stop to owner creation
* :bug: fix wrong setting of User.isPending
* :bug: fix isPending
* 🏷 add isPending to PublicUser
* :bug: fix unused import
* update delete modal
* change password modal
* remove _label
* sort keys
* remove key
* update key names
* fix test endpoint
* 🥅 Handle error workflows permissions (#2908)
* Handle error workflows permissions
* Fixed wrong query format
* 🛠 refactor query
Co-authored-by: Ben Hesseldieck <1849459+BHesseldieck@users.noreply.github.com>
* fix ts issue
* fix list after ispending changes
* fix error page bugs
* fix error redirect
* fix notification
* :bug: fix survey import in migration
* fix up spacing
* update keys spacing
* update keys
* add space
* update key
* fix up more spacing
* 🔐 add current password (#2919)
* add curr pass
* update key names
* :bug: stringify tag ids
* 🔐 check current password before update
* add package lock
* fix dep version
* update version
* 🐛 fix access for instance owner to credentials (#2927)
* 🛠 stringify tag id on entity
* 🔐 Update password requirements (#2920)
* :zap: Update password requirements
* :zap: Adjust random helpers
* ✅ fix tests for currentPassword check
* change redirection, add homepage
* fix error view redirection
* updated wording
* fix setup redirection
* update validator
* remove successfully
* update consumers
* update settings redirect
* on signup, redirect to homepage
* update empty state
* add space to emails
* remove brackets
* add opacity
* update spacing
* remove border from last user
* personal details updated
* update redirect on sign up
* prevent text wrap
* fix notification title line height
* remove console log
* 🐘 Support testing with Postgres and MySQL (#2886)
* :card_file_box: Fix Postgres migrations
* :zap: Add DB-specific scripts
* :sparkles: Set up test connections
* :zap: Add Postgres UUID check
* :test_tube: Make test adjustments for Postgres
* :zap: Refactor connection logic
* :sparkles: Set up double init for Postgres
* :pencil2: Add TODOs
* :zap: Refactor DB dropping logic
* :sparkles: Implement global teardown
* :sparkles: Create TypeORM wrappers
* :sparkles: Initial MySQL setup
* :zap: Clean up Postgres connection options
* :zap: Simplify by sharing bootstrap connection name
* :card_file_box: Fix MySQL migrations
* :fire: Remove comments
* :zap: Use ES6 imports
* :fire: Remove outdated comments
* :zap: Centralize bootstrap connection name handles
* :zap: Centralize database types
* :pencil2: Update comment
* :truck: Rename `findRepository`
* :construction: Attempt to truncate MySQL
* :sparkles: Implement creds router
* :bug: Fix duplicated MySQL bootstrap
* :bug: Fix misresolved merge conflict
* :card_file_box: Fix tags migration
* :card_file_box: Fix MySQL UM migration
* :bug: Fix MySQL parallelization issues
* :blue_book: Augment TypeORM to prevent error
* :fire: Remove comments
* :sparkles: Support one sqlite DB per suite run
* :truck: Move `testDb` to own module
* :fire: Deduplicate bootstrap Postgres logic
* :fire: Remove unneeded comment
* :zap: Make logger init calls consistent
* :pencil2: Improve comment
* :pencil2: Add dividers
* :art: Improve formatting
* :fire: Remove duplicate MySQL global setting
* :truck: Move comment
* :zap: Update default test script
* :fire: Remove unneeded helper
* :zap: Unmarshal answers from Postgres
* :bug: Phase out `isTestRun`
* :zap: Refactor `isEmailSetup`
* :fire: Remove unneeded imports
* :zap: Handle bootstrap connection errors
* :fire: Remove unneeded imports
* :fire: Remove outdated comments
* :pencil2: Fix typos
* :truck: Relocate `answersFormatter`
* :rewind: Undo package.json miscommit
* :fire: Remove unneeded import
* :zap: Refactor test DB prefixing
* :zap: Add no-leftover check to MySQL
* :package: Update package.json
* :zap: Autoincrement on simulated MySQL truncation
* :fire: Remove debugging queries
* ✏️ fix email template link expiry
* 🔥 remove unused import
* ✅ fix testing email not sent error
* fix duplicate import
* add package lock
* fix export
* change opacity
* fix text issue
* update action box
* update error title
* update forgot password
* update survey
* update product text
* remove unset fields
* add category to page events
* remove duplicate import
* update key
* update key
* update label type
* 🎨 um/fe review (#2946)
* :whale: Update Node.js versions of Docker images to 16
* :bug: Fix that some keyboard shortcuts did no longer work
* N8N-3057 Fixed Keyboard shortcuts no longer working on / Fixed callDebounced function
* N8N-3057 Update Debounce Function
* N8N-3057 Refactor callDebounce function
* N8N-3057 Update Dobounce Function
* :bug: Fix issue with tooltips getting displayed behind node details view
* fix tooltips z-index
* move all element ui components
* update package lock
* :bug: Fix credentials list load issue (#2931)
* always fetch credentials
* only fetch credentials once
* :zap: Allow to disable hiring banner (#2902)
* :sparkles: Add flag
* :zap: Adjust interfaces
* :zap: Adjust store module
* :zap: Adjust frontend settings
* :zap: Adjust frontend display
* :bug: Fix issue that ctrl + o did behave wrong on workflow templates page (#2934)
* N8N-3094 Workflow Templates cmd-o acts on the Preview/Iframe
* N8N-3094 Workflow Templates cmd-o acts on the Preview/Iframe
* disable shortcuts for preview
Co-authored-by: Mutasem <mutdmour@gmail.com>
* :arrow_up: Update package-lock.json file
* :bug: Fix sorting by field in Baserow Node (#2942)
This fixes a bug which currently leads to the "Sorting" option of the node to be ignored.
* :bug: Fix some i18n line break issues
* :sparkles: Add Odoo Node (#2601)
* added odoo scaffolding
* update getting data from odoo instance
* added scaffolding for main loop and request functions
* added functions for CRUD opperations
* improoved error handling for odooJSONRPCRequest
* updated odoo node and fixing nodelinter issues
* fixed alpabetical order
* fixed types in odoo node
* fixing linter errors
* fixing linter errors
* fixed data shape returned from man loop
* updated node input types, added fields list to models
* update when custom resource is selected options for fields list will be populated dynamicly
* minor fixes
* :hammer: fixed credential test, updating CRUD methods
* :hammer: added additional fields to crm resource
* :hammer: added descriptions, fixed credentials test bug
* :hammer: standardize node and descriptions design
* :hammer: removed comments
* :hammer: added pagination to getAll operation
* :zap: removed leftover function from previous implementation, removed required from optional fields
* :zap: fixed id field, added indication of type and if required to field description, replaced string input in filters to fetched list of fields
* :hammer: fetching list of models from odoo, added selection of fields to be returned to predefined models, fixes accordingly to review
* :zap: Small improvements
* :hammer: extracted adress fields into collection, changed fields to include in descriptions, minor tweaks
* :zap: Improvements
* :hammer: working on review
* :hammer: fixed linter errors
* :hammer: review wip
* :hammer: review wip
* :hammer: review wip
* :zap: updated display name for URL in credentials
* :hammer: added checks for valid id to delete and update
* :zap: Minor improvements
Co-authored-by: ricardo <ricardoespinoza105@gmail.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
* :bug: Handle Wise SCA requests (#2734)
* :zap: Improve Wise error message after previous change
* fix duplicate import
* add package lock
* fix export
* change opacity
* fix text issue
* update action box
* update error title
* update forgot password
* update survey
* update product text
* remove unset fields
* add category to page events
* remove duplicate import
* update key
* update key
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
Co-authored-by: Oliver Trajceski <olivertrajceski@yahoo.com>
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Tom <19203795+that-one-tom@users.noreply.github.com>
Co-authored-by: Michael Kret <88898367+michael-radency@users.noreply.github.com>
Co-authored-by: ricardo <ricardoespinoza105@gmail.com>
Co-authored-by: pemontto <939704+pemontto@users.noreply.github.com>
* Move owner skip from settings
* 🐛 SMTP fixes (#2937)
* :fire: Remove `UM_` from SMTP env vars
* :fire: Remove SMTP host default value
* :zap: Update sender value
* :zap: Update invite template
* :zap: Update password reset template
* :zap: Update `N8N_EMAIL_MODE` default value
* :fire: Remove `EMAIL` from all SMTP vars
* :sparkles: Implement `verifyConnection()`
* :truck: Reposition comment
* :pencil2: Fix typo
* :pencil2: Minor env var documentation improvements
* :art: Fix spacing
* :art: Fix spacing
* :card_file_box: Remove SMTP settings cache
* :zap: Adjust log message
* :zap: Update error message
* :pencil2: Fix template typo
* :pencil2: Adjust wording
* :zap: Interpolate email into success toast
* :pencil2: Adjust base message in `verifyConnection()`
* :zap: Verify connection on password reset
* :zap: Bring up POST /users SMTP check
* :bug: remove cookie if cookie is not valid
* :zap: verify connection on instantiation
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* 🔊 create logger helper for migrations (#2944)
* 🔥 remove unused database
* :loud_sound: add migration logging for sqlite
* 🔥 remove unnecessary index creation
* ⚡️ change log level to warn
* 🐛 Fix issue with workflow process to initialize db connection correctly (#2948)
* ✏️ update error messages for webhhook run/activation
* 📈 Implement telemetry events (#2868)
* Implement basic telemetry events
* Fixing user id as part of the telemetry data
* Added user id to be part of the tracked data
* :sparkles: Create telemetry mock
* :test_tube: Fix tests with telemetry mock
* :test_tube: Fix missing key in authless endpoint
* :blue_book: Create authless request type
* :fire: Remove log
* :bug: Fix `migration_strategy` assignment
* :blue_book: Remove `instance_id` from `ITelemetryUserDeletionData`
* :zap: Simplify concatenation
* :zap: Simplify `track()` call signature
* Fixed payload of telemetry to always include user_id
* Fixing minor issues
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
* 🔊 Added logs to credentials, executions and workflows (#2915)
* Added logs to credentials, executions and workflows
* Some updates according to ivov's feedback
* :zap: update log levels
* ✅ fix tests
Co-authored-by: Ben Hesseldieck <b.hesseldieck@gmail.com>
* :bug: fix telemetry error
* fix conflicts with master
* fix duplicate
* add package-lock
* :bug: Um/fixes (#2952)
* add initials to avatar
* redirect to signin if invalid token
* update pluralization
* add auth page category
* data transferred
* touch up setup page
* update button to add cursor
* fix personalization modal not closing
* ✏️ fix environment name
* 🐛 fix disabling UM
* 🐛 fix email setup flag
* 🐛 FE fixes 1 (#2953)
* add initials to avatar
* redirect to signin if invalid token
* update pluralization
* add auth page category
* data transferred
* touch up setup page
* update button to add cursor
* fix personalization modal not closing
* capitalize labels, refactor text
* Fixed the issue with telemetry data missing for personalization survey
* Changed invite email text
* 🐛 Fix quotes issue with postgres migration (#2958)
* Changed text for invite link
* 🐛 fix reset command for mysql
* ✅ fix race condition in test DB creation
* 🔐 block user creation if UM is disabled
* 🥅 improve smtp setup issue error
* :zap: update error message
* refactor route rules
* set package lock
* fix access
* remove capitalize
* update input labels
* refactor heading
* change span to fragment
* add route types
* refactor views
* ✅ fix increase timeout for mysql
* :zap: correct logic of error message
* refactor view names
* :zap: update randomString
* 📈 Added missing event regarding failed emails (#2964)
* replace label with info
* 🛠 refactor JWT-secret creation
* remove duplicate key
* remove unused part
* remove semicolon
* fix up i18n pattern
* update translation keys
* update urls
* support i18n in nds
* fix how external keys are handled
* add source
* 💥 update timestamp of UM migration
* ✏️ small message updates
* fix tracking
* update notification line-height
* fix avatar opacity
* fix up empty state
* shift focus to input
* 🔐 Disable basic auth after owner has been set up (#2973)
* Disable basic auth after owner has been set up
* Remove unnecessary comparison
* rename modal title
* 🐛 use pgcrypto extension for uuid creation (#2977)
* 📧 Added public url variable for emails (#2967)
* Added public url variable for emails
* Fixed base url for reset password - the current implementation overrides possibly existing path
* Change variable name to editorUrl
* Using correct name editorUrl for emails
* Changed variable description
* Improved base url naming and appending path so it remains consistent
* Removed trailing slash from editor base url
* 🌐 fix i18n pattern (#2970)
* fix up i18n pattern
* update translation keys
* update urls
* support i18n in nds
* fix how external keys are handled
* add source
* Um/fixes 1000 (#2980)
* fix select issue
* 😫 hacky solution to circumvent pgcrypto (#2979)
* fix owner bug after transfer. always fetch latest credentials
* add confirmation modal to setup
* Use webhook url as fallback when editor url is not defined
* fix enter bug
* update modal
* update modal
* update modal text, fix bug in settings view
* Updating editor url to not append path
* rename keys
Co-authored-by: Iván Ovejero <ivov.src@gmail.com>
Co-authored-by: Mutasem Aldmour <4711238+mutdmour@users.noreply.github.com>
Co-authored-by: Mutasem <mutdmour@gmail.com>
Co-authored-by: Ahsan Virani <ahsan.virani@gmail.com>
Co-authored-by: Omar Ajoue <krynble@gmail.com>
Co-authored-by: Oliver Trajceski <olivertrajceski@yahoo.com>
Co-authored-by: Jan Oberhauser <jan.oberhauser@gmail.com>
Co-authored-by: Tom <19203795+that-one-tom@users.noreply.github.com>
Co-authored-by: Michael Kret <88898367+michael-radency@users.noreply.github.com>
Co-authored-by: ricardo <ricardoespinoza105@gmail.com>
Co-authored-by: pemontto <939704+pemontto@users.noreply.github.com>
2022-03-14 06:46:32 -07:00
|
|
|
userId: user.id,
|
2019-08-08 11:38:25 -07:00
|
|
|
};
|
|
|
|
|
2021-11-05 09:45:51 -07:00
|
|
|
let responsePromise: IDeferredPromise<IN8nHttpFullResponse> | undefined;
|
|
|
|
if (responseMode === 'responseNode') {
|
|
|
|
responsePromise = await createDeferredPromise<IN8nHttpFullResponse>();
|
|
|
|
responsePromise
|
|
|
|
.promise()
|
|
|
|
.then((response: IN8nHttpFullResponse) => {
|
|
|
|
if (didSendResponse) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-05-17 01:06:24 -07:00
|
|
|
const binaryData = (response.body as IDataObject)?.binaryData as IBinaryData;
|
|
|
|
if (binaryData?.id) {
|
|
|
|
res.header(response.headers);
|
2023-09-22 08:22:12 -07:00
|
|
|
const stream = Container.get(BinaryDataService).getAsStream(binaryData.id);
|
2023-05-17 01:06:24 -07:00
|
|
|
void pipeline(stream, res).then(() =>
|
|
|
|
responseCallback(null, { noWebhookResponse: true }),
|
|
|
|
);
|
|
|
|
} else if (Buffer.isBuffer(response.body)) {
|
2021-11-05 09:45:51 -07:00
|
|
|
res.header(response.headers);
|
|
|
|
res.end(response.body);
|
2023-05-17 01:06:24 -07:00
|
|
|
responseCallback(null, { noWebhookResponse: true });
|
2021-11-05 09:45:51 -07:00
|
|
|
} else {
|
|
|
|
// TODO: This probably needs some more changes depending on the options on the
|
|
|
|
// Webhook Response node
|
|
|
|
responseCallback(null, {
|
|
|
|
data: response.body as IDataObject,
|
|
|
|
headers: response.headers,
|
|
|
|
responseCode: response.statusCode,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
didSendResponse = true;
|
|
|
|
})
|
|
|
|
.catch(async (error) => {
|
2022-11-04 09:34:47 -07:00
|
|
|
ErrorReporter.error(error);
|
2021-11-05 09:45:51 -07:00
|
|
|
Logger.error(
|
|
|
|
`Error with Webhook-Response for execution "${executionId}": "${error.message}"`,
|
|
|
|
{ executionId, workflowId: workflow.id },
|
|
|
|
);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-06-23 03:35:23 -07:00
|
|
|
// Start now to run the workflow
|
2019-08-08 11:38:25 -07:00
|
|
|
const workflowRunner = new WorkflowRunner();
|
2021-11-05 09:45:51 -07:00
|
|
|
executionId = await workflowRunner.run(
|
|
|
|
runData,
|
|
|
|
true,
|
|
|
|
!didSendResponse,
|
|
|
|
executionId,
|
|
|
|
responsePromise,
|
|
|
|
);
|
2019-06-23 03:35:23 -07:00
|
|
|
|
2021-05-01 20:43:01 -07:00
|
|
|
Logger.verbose(
|
|
|
|
`Started execution of workflow "${workflow.name}" from webhook with execution ID ${executionId}`,
|
|
|
|
{ executionId },
|
|
|
|
);
|
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (!didSendResponse) {
|
|
|
|
// Get a promise which resolves when the workflow did execute and send then response
|
|
|
|
const executePromise = Container.get(ActiveExecutions).getPostExecutePromise(
|
|
|
|
executionId,
|
|
|
|
) as Promise<IExecutionDb | undefined>;
|
|
|
|
executePromise
|
|
|
|
.then(async (data) => {
|
|
|
|
if (data === undefined) {
|
|
|
|
if (!didSendResponse) {
|
|
|
|
responseCallback(null, {
|
|
|
|
data: {
|
|
|
|
message: 'Workflow executed successfully but no data was returned',
|
|
|
|
},
|
|
|
|
responseCode,
|
|
|
|
});
|
|
|
|
didSendResponse = true;
|
|
|
|
}
|
|
|
|
return undefined;
|
2021-08-29 11:58:11 -07:00
|
|
|
}
|
2022-07-20 08:50:39 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (workflowData.pinData) {
|
|
|
|
data.data.resultData.pinData = workflowData.pinData;
|
2021-08-29 11:58:11 -07:00
|
|
|
}
|
2020-11-13 14:31:27 -08:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
const returnData = WorkflowHelpers.getDataLastExecutedNodeData(data);
|
|
|
|
if (data.data.resultData.error || returnData?.error !== undefined) {
|
|
|
|
if (!didSendResponse) {
|
|
|
|
responseCallback(null, {
|
|
|
|
data: {
|
|
|
|
message: 'Error in workflow',
|
|
|
|
},
|
|
|
|
responseCode: 500,
|
|
|
|
});
|
|
|
|
}
|
2021-11-05 09:45:51 -07:00
|
|
|
didSendResponse = true;
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
return data;
|
2021-11-05 09:45:51 -07:00
|
|
|
}
|
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (responseMode === 'responseNode') {
|
|
|
|
if (!didSendResponse) {
|
|
|
|
// Return an error if no Webhook-Response node did send any data
|
|
|
|
responseCallback(null, {
|
|
|
|
data: {
|
|
|
|
message: 'Workflow executed successfully',
|
|
|
|
},
|
|
|
|
responseCode,
|
|
|
|
});
|
|
|
|
didSendResponse = true;
|
|
|
|
}
|
|
|
|
return undefined;
|
2021-08-29 11:58:11 -07:00
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (returnData === undefined) {
|
|
|
|
if (!didSendResponse) {
|
|
|
|
responseCallback(null, {
|
|
|
|
data: {
|
|
|
|
message:
|
|
|
|
'Workflow executed successfully but the last node did not return any data',
|
|
|
|
},
|
|
|
|
responseCode,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
didSendResponse = true;
|
|
|
|
return data;
|
|
|
|
}
|
2020-03-16 05:23:45 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
const additionalKeys: IWorkflowDataProxyAdditionalKeys = {
|
|
|
|
$executionId: executionId,
|
|
|
|
};
|
2019-10-16 05:01:39 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (!didSendResponse) {
|
|
|
|
let data: IDataObject | IDataObject[] | undefined;
|
2019-10-16 05:01:39 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (responseData === 'firstEntryJson') {
|
|
|
|
// Return the JSON data of the first entry
|
2019-10-16 05:01:39 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (returnData.data!.main[0]![0] === undefined) {
|
|
|
|
responseCallback(new Error('No item to return got found'), {});
|
|
|
|
didSendResponse = true;
|
|
|
|
return undefined;
|
|
|
|
}
|
2019-10-16 05:01:39 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
data = returnData.data!.main[0]![0].json;
|
2019-10-16 05:01:39 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
const responsePropertyName = workflow.expression.getSimpleParameterValue(
|
|
|
|
workflowStartNode,
|
|
|
|
webhookData.webhookDescription.responsePropertyName,
|
|
|
|
executionMode,
|
|
|
|
additionalData.timezone,
|
|
|
|
additionalKeys,
|
|
|
|
undefined,
|
|
|
|
undefined,
|
|
|
|
);
|
2021-08-29 11:58:11 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (responsePropertyName !== undefined) {
|
|
|
|
data = get(data, responsePropertyName as string) as IDataObject;
|
2021-08-29 11:58:11 -07:00
|
|
|
}
|
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
const responseContentType = workflow.expression.getSimpleParameterValue(
|
|
|
|
workflowStartNode,
|
|
|
|
webhookData.webhookDescription.responseContentType,
|
|
|
|
executionMode,
|
|
|
|
additionalData.timezone,
|
|
|
|
additionalKeys,
|
|
|
|
undefined,
|
|
|
|
undefined,
|
|
|
|
);
|
2019-10-16 05:01:39 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (responseContentType !== undefined) {
|
|
|
|
// Send the webhook response manually to be able to set the content-type
|
|
|
|
res.setHeader('Content-Type', responseContentType as string);
|
|
|
|
|
|
|
|
// Returning an object, boolean, number, ... causes problems so make sure to stringify if needed
|
|
|
|
if (
|
|
|
|
data !== null &&
|
|
|
|
data !== undefined &&
|
|
|
|
['Buffer', 'String'].includes(data.constructor.name)
|
|
|
|
) {
|
|
|
|
res.end(data);
|
|
|
|
} else {
|
|
|
|
res.end(JSON.stringify(data));
|
|
|
|
}
|
|
|
|
|
|
|
|
responseCallback(null, {
|
|
|
|
noWebhookResponse: true,
|
|
|
|
});
|
|
|
|
didSendResponse = true;
|
|
|
|
}
|
|
|
|
} else if (responseData === 'firstEntryBinary') {
|
|
|
|
// Return the binary data of the first entry
|
|
|
|
data = returnData.data!.main[0]![0];
|
|
|
|
|
|
|
|
if (data === undefined) {
|
|
|
|
responseCallback(new Error('No item was found to return'), {});
|
|
|
|
didSendResponse = true;
|
|
|
|
return undefined;
|
|
|
|
}
|
2020-03-16 05:23:45 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (data.binary === undefined) {
|
|
|
|
responseCallback(new Error('No binary data was found to return'), {});
|
|
|
|
didSendResponse = true;
|
|
|
|
return undefined;
|
|
|
|
}
|
2020-03-16 05:23:45 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
const responseBinaryPropertyName = workflow.expression.getSimpleParameterValue(
|
|
|
|
workflowStartNode,
|
|
|
|
webhookData.webhookDescription.responseBinaryPropertyName,
|
|
|
|
executionMode,
|
|
|
|
additionalData.timezone,
|
|
|
|
additionalKeys,
|
|
|
|
undefined,
|
|
|
|
'data',
|
|
|
|
);
|
2021-08-29 11:58:11 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (responseBinaryPropertyName === undefined && !didSendResponse) {
|
|
|
|
responseCallback(new Error("No 'responseBinaryPropertyName' is set"), {});
|
|
|
|
didSendResponse = true;
|
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
const binaryData = (data.binary as IBinaryKeyData)[
|
|
|
|
responseBinaryPropertyName as string
|
|
|
|
];
|
|
|
|
if (binaryData === undefined && !didSendResponse) {
|
|
|
|
responseCallback(
|
|
|
|
new Error(
|
|
|
|
`The binary property '${responseBinaryPropertyName}' which should be returned does not exist`,
|
|
|
|
),
|
|
|
|
{},
|
|
|
|
);
|
|
|
|
didSendResponse = true;
|
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (!didSendResponse) {
|
|
|
|
// Send the webhook response manually
|
|
|
|
res.setHeader('Content-Type', binaryData.mimeType);
|
|
|
|
if (binaryData.id) {
|
2023-09-22 08:22:12 -07:00
|
|
|
const stream = Container.get(BinaryDataService).getAsStream(binaryData.id);
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
await pipeline(stream, res);
|
|
|
|
} else {
|
|
|
|
res.end(Buffer.from(binaryData.data, BINARY_ENCODING));
|
|
|
|
}
|
|
|
|
|
|
|
|
responseCallback(null, {
|
|
|
|
noWebhookResponse: true,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
} else if (responseData === 'noData') {
|
|
|
|
// Return without data
|
|
|
|
data = undefined;
|
|
|
|
} else {
|
|
|
|
// Return the JSON data of all the entries
|
|
|
|
data = [];
|
|
|
|
for (const entry of returnData.data!.main[0]!) {
|
|
|
|
data.push(entry.json);
|
2023-06-19 04:54:56 -07:00
|
|
|
}
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
if (!didSendResponse) {
|
2019-06-23 03:35:23 -07:00
|
|
|
responseCallback(null, {
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
data,
|
|
|
|
responseCode,
|
2019-06-23 03:35:23 -07:00
|
|
|
});
|
2021-08-29 11:58:11 -07:00
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
}
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
didSendResponse = true;
|
2019-06-23 03:35:23 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
return data;
|
|
|
|
})
|
|
|
|
.catch((e) => {
|
2019-10-16 05:01:39 -07:00
|
|
|
if (!didSendResponse) {
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
responseCallback(new Error('There was a problem executing the workflow'), {});
|
2019-10-16 05:01:39 -07:00
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
|
refactor(core): Move event and telemetry handling into workers in queue mode (#7138)
# Motivation
In Queue mode, finished executions would cause the main instance to
always pull all execution data from the database, unflatten it and then
use it to send out event log events and telemetry events, as well as
required returns to Respond to Webhook nodes etc.
This could cause OOM errors when the data was large, since it had to be
fully unpacked and transformed on the main instance’s side, using up a
lot of memory (and time).
This PR attempts to limit this behaviour to only happen in those
required cases where the data has to be forwarded to some waiting
webhook, for example.
# Changes
Execution data is only required in cases, where the active execution has
a `postExecutePromise` attached to it. These usually forward the data to
some other endpoint (e.g. a listening webhook connection).
By adding a helper `getPostExecutePromiseCount()`, we can decide that in
cases where there is nothing listening at all, there is no reason to
pull the data on the main instance.
Previously, there would always be postExecutePromises because the
telemetry events were called. Now, these have been moved into the
workers, which have been given the various InternalHooks calls to their
hook function arrays, so they themselves issue these telemetry and event
calls.
This results in all event log messages to now be logged on the worker’s
event log, as well as the worker’s eventbus being the one to send out
the events to destinations. The main event log does…pretty much nothing.
We are not logging executions on the main event log any more, because
this would require all events to be replicated 1:1 from the workers to
the main instance(s) (this IS possible and implemented, see the worker’s
`replicateToRedisEventLogFunction` - but it is not enabled to reduce the
amount of traffic over redis).
Partial events in the main log could confuse the recovery process and
would result in, ironically, the recovery corrupting the execution data
by considering them crashed.
# Refactor
I have also used the opportunity to reduce duplicate code and move some
of the hook functionality into
`packages/cli/src/executionLifecycleHooks/shared/sharedHookFunctions.ts`
in preparation for a future full refactor of the hooks
2023-09-13 22:58:15 -07:00
|
|
|
throw new ResponseHelper.InternalServerError(e.message);
|
|
|
|
});
|
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
return executionId;
|
|
|
|
} catch (e) {
|
2023-08-01 08:32:30 -07:00
|
|
|
const error =
|
|
|
|
e instanceof ResponseHelper.UnprocessableRequestError
|
|
|
|
? e
|
|
|
|
: new Error('There was a problem executing the workflow', { cause: e });
|
|
|
|
if (didSendResponse) throw error;
|
|
|
|
responseCallback(error, {});
|
|
|
|
return;
|
2019-06-23 03:35:23 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the base URL of the webhooks
|
|
|
|
*/
|
|
|
|
export function getWebhookBaseUrl() {
|
2023-06-02 04:10:19 -07:00
|
|
|
let urlBaseWebhook = process.env.WEBHOOK_URL ?? GenericHelpers.getBaseUrl();
|
2021-08-12 02:22:44 -07:00
|
|
|
if (!urlBaseWebhook.endsWith('/')) {
|
|
|
|
urlBaseWebhook += '/';
|
|
|
|
}
|
2019-06-23 03:35:23 -07:00
|
|
|
return urlBaseWebhook;
|
|
|
|
}
|