feat: Benchmark env with run scripts (no-changelog) (#10477)

This commit is contained in:
Tomi Turtiainen 2024-08-23 14:43:26 +03:00 committed by GitHub
parent 8403f4aa11
commit a1a1b0a7b4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 955 additions and 15 deletions

View file

@ -11,19 +11,19 @@ A PR title consists of these elements:
| | Capitalized
| | No period at the end.
│ │
│ └─⫸ Scope: API|core|editor|* Node
│ └─⫸ Scope: API|core|editor|* Node|benchmark
└─⫸ Type: build|ci|docs|feat|fix|perf|refactor|test
```
- PR title
- type
- scope (*optional*)
- summary
- type
- scope (_optional_)
- summary
- PR description
- body (optional)
- blank line
- footer (optional)
- body (optional)
- blank line
- footer (optional)
The structure looks like this:
@ -46,13 +46,14 @@ If the prefix is `feat`, `fix` or `perf`, it will appear in the changelog. H
The scope should specify the place of the commit change as long as the commit clearly addresses one of the following supported scopes. (Otherwise, omit the scope!)
- `API` - changes to the *public* API
- `API` - changes to the _public_ API
- `core` - changes to the core / private API / backend of n8n
- `editor` - changes to the Editor UI
- `* Node` - changes to a specific node or trigger node (”`*`” to be replaced with the node name, not its display name), e.g.
- mattermost → Mattermost Node
- microsoftToDo → Microsoft To Do Node
- n8n → n8n Node
- mattermost → Mattermost Node
- microsoftToDo → Microsoft To Do Node
- n8n → n8n Node
- `benchmark` - changes to the Benchmark cli
### **Summary**
@ -60,8 +61,8 @@ The summary contains succinct description of the change:
- use the imperative, present tense: "change" not "changed" nor "changes"
- capitalize the first letter
- *no* dot (.) at the end
- do *not* include Linear ticket IDs etc. (e.g. N8N-1234)
- _no_ dot (.) at the end
- do _not_ include Linear ticket IDs etc. (e.g. N8N-1234)
- suffix with “(no-changelog)” for commits / PRs that should not get mentioned in the changelog.
### **Body (optional)**
@ -95,7 +96,7 @@ Closes #<pr number>
A Breaking Change section should start with the phrase "`BREAKING CHANGE:` " followed by a summary of the breaking change, a blank line, and a detailed description of the breaking change that also includes migration instructions.
> 💡 A breaking change can additionally also be marked by adding a “`!`” to the header, right before the “`:`”, e.g. `feat(editor)!: Remove support for dark mode`
>
>
> This makes locating breaking changes easier when just skimming through commit messages.
> 💡 The breaking changes must also be added to the [packages/cli/BREAKING-CHANGES.md](https://github.com/n8n-io/n8n/blob/master/packages/cli/BREAKING-CHANGES.md) file located in the n8n repository.
@ -109,4 +110,4 @@ If the commit reverts a previous commit, it should begin with `revert:` , foll
The content of the commit message body should contain:
- information about the SHA of the commit being reverted in the following format: `This reverts commit <SHA>`,
- a clear description of the reason for reverting the commit message.
- a clear description of the reason for reverting the commit message.

View file

@ -0,0 +1,40 @@
name: Destroy Benchmark Env
on:
schedule:
- cron: '30 4 * * *'
workflow_dispatch:
permissions:
id-token: write
contents: read
jobs:
build:
runs-on: ubuntu-latest
environment: benchmark
steps:
- name: Checkout
uses: actions/checkout@v4.1.1
- name: Azure login
uses: azure/login@v2.1.1
with:
client-id: ${{ secrets.BENCHMARK_ARM_CLIENT_ID }}
tenant-id: ${{ secrets.BENCHMARK_ARM_TENANT_ID }}
subscription-id: ${{ secrets.BENCHMARK_ARM_SUBSCRIPTION_ID }}
- run: Setup node
- uses: actions/setup-node@v4.0.2
with:
node-version: 20.x
cache: pnpm
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Destroy cloud env
if: github.event.inputs.debug == 'true'
run: pnpm destroy-cloud-env
working-directory: packages/@n8n/benchmark

68
.github/workflows/benchmark-nightly.yml vendored Normal file
View file

@ -0,0 +1,68 @@
name: Run Nightly Benchmark
run-name: Benchmark ${{ inputs.n8n_tag }}
on:
schedule:
- cron: '0 2 * * *'
workflow_dispatch:
inputs:
debug:
description: 'Use debug logging'
required: true
default: 'false'
n8n_tag:
description: 'Name of the n8n docker tag to run the benchmark against.'
required: true
default: 'nightly'
benchmark_tag:
description: 'Name of the benchmark cli docker tag to run the benchmark with.'
required: true
default: 'latest'
env:
ARM_CLIENT_ID: ${{ secrets.BENCHMARK_ARM_CLIENT_ID }}
ARM_SUBSCRIPTION_ID: ${{ secrets.BENCHMARK_ARM_SUBSCRIPTION_ID }}
ARM_TENANT_ID: ${{ secrets.BENCHMARK_ARM_TENANT_ID }}
permissions:
id-token: write
contents: read
jobs:
build:
runs-on: ubuntu-latest
environment: benchmark
steps:
- name: Checkout
uses: actions/checkout@v4.1.1
- uses: hashicorp/setup-terraform@v3
with:
terraform_version: '1.8.5'
- run: corepack enable
- uses: actions/setup-node@v4.0.2
with:
node-version: 20.x
cache: pnpm
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Azure login
uses: azure/login@v2.1.1
with:
client-id: ${{ env.ARM_CLIENT_ID }}
tenant-id: ${{ env.ARM_TENANT_ID }}
subscription-id: ${{ env.ARM_SUBSCRIPTION_ID }}
- name: Run the benchmark with debug logging
if: github.event.inputs.debug == 'true'
run: pnpm run-in-cloud sqlite --debug
working-directory: packages/@n8n/benchmark
- name: Run the benchmark
if: github.event.inputs.debug != 'true'
run: pnpm run-in-cloud sqlite
working-directory: packages/@n8n/benchmark

4
packages/@n8n/benchmark/.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
**/.terraform/*
**/*.tfstate*
**/*.tfvars
privatekey.pem

View file

@ -40,6 +40,14 @@ N8N_USER_EMAIL=user@n8n.io N8N_USER_PASSWORD=password ./bin/n8n-benchmark run
K6_PATH=/opt/homebrew/bin/k6 N8N_USER_EMAIL=user@n8n.io N8N_USER_PASSWORD=password ./bin/n8n-benchmark run
```
## Running in the cloud
There's a script to run the performance tests in a cloud environment. The script provisions a cloud environment, sets up n8n in the environment, runs the tests and destroys the environment.
```sh
pnpm run-in-cloud
```
## Configuration
The configuration options the cli accepts can be seen from [config.ts](./src/config/config.ts)

View file

@ -0,0 +1,60 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hashicorp/azurerm" {
version = "3.115.0"
constraints = "~> 3.115.0"
hashes = [
"h1:O7C3Xb+MSOc9C/eAJ5C/CiJ4vuvUsYxxIzr9ZurmHNI=",
"zh:0ea93abd53cb872691bad6d5625bda88b5d9619ea813c208b36e0ee236308589",
"zh:26703cb9c2c38bc43e97bc83af03559d065750856ea85834b71fbcb2ef9d935c",
"zh:316255a3391c49fe9bd7c5b6aa53b56dd490e1083d19b722e7b8f956a2dfe004",
"zh:431637ae90c592126fb1ec813fee6390604275438a0d5e15904c65b0a6a0f826",
"zh:4cee0fa2e84f89853723c0bc72b7debf8ea2ffffc7ae34ff28d8a69269d3a879",
"zh:64a3a3c78ea877515365ed336bd0f3abbe71db7c99b3d2837915fbca168d429c",
"zh:7380d7b503b5a87fd71a31360c3eeab504f78e4f314824e3ceda724d9dc74cf0",
"zh:974213e05708037a6d2d8c58cc84981819138f44fe40e344034eb80e16ca6012",
"zh:9a91614de0476074e9c62bbf08d3bb9c64adbd1d3a4a2b5a3e8e41d9d6d5672f",
"zh:a438471c85b8788ab21bdef4cd5ca391a46cbae33bd0262668a80f5e6c4610e1",
"zh:bf823f2c941b336a1208f015466212b1a8fdf6da28abacf59bea708377709d9e",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}
provider "registry.terraform.io/hashicorp/random" {
version = "3.6.2"
hashes = [
"h1:VavG5unYCa3SYISMKF9pzc3718M0bhPlcbUZZGl7wuo=",
"zh:0ef01a4f81147b32c1bea3429974d4d104bbc4be2ba3cfa667031a8183ef88ec",
"zh:1bcd2d8161e89e39886119965ef0f37fcce2da9c1aca34263dd3002ba05fcb53",
"zh:37c75d15e9514556a5f4ed02e1548aaa95c0ecd6ff9af1119ac905144c70c114",
"zh:4210550a767226976bc7e57d988b9ce48f4411fa8a60cd74a6b246baf7589dad",
"zh:562007382520cd4baa7320f35e1370ffe84e46ed4e2071fdc7e4b1a9b1f8ae9b",
"zh:5efb9da90f665e43f22c2e13e0ce48e86cae2d960aaf1abf721b497f32025916",
"zh:6f71257a6b1218d02a573fc9bff0657410404fb2ef23bc66ae8cd968f98d5ff6",
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
"zh:9647e18f221380a85f2f0ab387c68fdafd58af6193a932417299cdcae4710150",
"zh:bb6297ce412c3c2fa9fec726114e5e0508dd2638cad6a0cb433194930c97a544",
"zh:f83e925ed73ff8a5ef6e3608ad9225baa5376446349572c2449c0c0b3cf184b7",
"zh:fbef0781cb64de76b1df1ca11078aecba7800d82fd4a956302734999cfd9a4af",
]
}
provider "registry.terraform.io/hashicorp/tls" {
version = "4.0.5"
hashes = [
"h1:zeG5RmggBZW/8JWIVrdaeSJa0OG62uFX5HY1eE8SjzY=",
"zh:01cfb11cb74654c003f6d4e32bbef8f5969ee2856394a96d127da4949c65153e",
"zh:0472ea1574026aa1e8ca82bb6df2c40cd0478e9336b7a8a64e652119a2fa4f32",
"zh:1a8ddba2b1550c5d02003ea5d6cdda2eef6870ece86c5619f33edd699c9dc14b",
"zh:1e3bb505c000adb12cdf60af5b08f0ed68bc3955b0d4d4a126db5ca4d429eb4a",
"zh:6636401b2463c25e03e68a6b786acf91a311c78444b1dc4f97c539f9f78de22a",
"zh:76858f9d8b460e7b2a338c477671d07286b0d287fd2d2e3214030ae8f61dd56e",
"zh:a13b69fb43cb8746793b3069c4d897bb18f454290b496f19d03c3387d1c9a2dc",
"zh:a90ca81bb9bb509063b736842250ecff0f886a91baae8de65c8430168001dad9",
"zh:c4de401395936e41234f1956ebadbd2ed9f414e6908f27d578614aaa529870d4",
"zh:c657e121af8fde19964482997f0de2d5173217274f6997e16389e7707ed8ece8",
"zh:d68b07a67fbd604c38ec9733069fbf23441436fecf554de6c75c032f82e1ef19",
"zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
]
}

View file

@ -0,0 +1,54 @@
data "azurerm_resource_group" "main" {
name = var.resource_group_name
}
# Random prefix for the resources
resource "random_string" "prefix" {
length = 8
special = false
}
# SSH key pair
resource "tls_private_key" "ssh_key" {
algorithm = "RSA"
rsa_bits = 4096
}
# Dedicated Host Group & Hosts
resource "azurerm_dedicated_host_group" "main" {
name = "${random_string.prefix.result}-hostgroup"
location = var.location
resource_group_name = data.azurerm_resource_group.main.name
platform_fault_domain_count = 1
automatic_placement_enabled = false
zone = 1
tags = local.common_tags
}
resource "azurerm_dedicated_host" "hosts" {
name = "${random_string.prefix.result}-host"
location = var.location
dedicated_host_group_id = azurerm_dedicated_host_group.main.id
sku_name = var.host_size_family
platform_fault_domain = 0
tags = local.common_tags
}
# VM
module "test_vm" {
source = "./modules/benchmark-vm"
location = var.location
resource_group_name = data.azurerm_resource_group.main.name
prefix = random_string.prefix.result
dedicated_host_id = azurerm_dedicated_host.hosts.id
ssh_public_key = tls_private_key.ssh_key.public_key_openssh
vm_size = var.vm_size
tags = local.common_tags
}

View file

@ -0,0 +1,7 @@
output "vm_name" {
value = azurerm_linux_virtual_machine.main.name
}
output "ip" {
value = azurerm_public_ip.main.ip_address
}

View file

@ -0,0 +1,31 @@
variable "location" {
description = "Region to deploy resources"
default = "East US"
}
variable "resource_group_name" {
description = "Name of the resource group"
}
variable "prefix" {
description = "Prefix to append to resources"
}
variable "dedicated_host_id" {
description = "Dedicated Host ID"
}
variable "ssh_public_key" {
description = "SSH Public Key"
}
variable "vm_size" {
description = "VM Size"
# 4 vCPUs, 16 GiB memory
default = "Standard_DC4s_v2"
}
variable "tags" {
description = "Tags to apply to all resources created by this module"
type = map(string)
}

View file

@ -0,0 +1,136 @@
# Network
resource "azurerm_virtual_network" "main" {
name = "${var.prefix}-vnet"
location = var.location
resource_group_name = var.resource_group_name
address_space = ["10.0.0.0/16"]
tags = var.tags
}
resource "azurerm_subnet" "main" {
name = "${var.prefix}-subnet"
resource_group_name = var.resource_group_name
virtual_network_name = azurerm_virtual_network.main.name
address_prefixes = ["10.0.0.0/24"]
}
resource "azurerm_network_security_group" "ssh" {
name = "${var.prefix}-nsg"
location = var.location
resource_group_name = var.resource_group_name
security_rule {
name = "AllowSSH"
priority = 1001
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
tags = var.tags
}
resource "azurerm_public_ip" "main" {
name = "${var.prefix}-pip"
location = var.location
resource_group_name = var.resource_group_name
allocation_method = "Static"
sku = "Standard"
tags = var.tags
}
resource "azurerm_network_interface" "main" {
name = "${var.prefix}-nic"
location = var.location
resource_group_name = var.resource_group_name
ip_configuration {
name = "${var.prefix}-ipconfig"
subnet_id = azurerm_subnet.main.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.main.id
}
tags = var.tags
}
resource "azurerm_network_interface_security_group_association" "ssh" {
network_interface_id = azurerm_network_interface.main.id
network_security_group_id = azurerm_network_security_group.ssh.id
}
# Disk
resource "azurerm_managed_disk" "data" {
name = "${var.prefix}-disk"
location = var.location
resource_group_name = var.resource_group_name
storage_account_type = "PremiumV2_LRS"
create_option = "Empty"
disk_size_gb = "16"
zone = 1
tags = var.tags
}
resource "azurerm_virtual_machine_data_disk_attachment" "data" {
managed_disk_id = azurerm_managed_disk.data.id
virtual_machine_id = azurerm_linux_virtual_machine.main.id
lun = "1"
caching = "None"
}
# VM
resource "azurerm_linux_virtual_machine" "main" {
name = "${var.prefix}-vm"
location = var.location
resource_group_name = var.resource_group_name
network_interface_ids = [azurerm_network_interface.main.id]
dedicated_host_id = var.dedicated_host_id
zone = 1
size = var.vm_size
admin_username = "benchmark"
admin_ssh_key {
username = "benchmark"
public_key = var.ssh_public_key
}
os_disk {
caching = "ReadWrite"
storage_account_type = "Premium_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "0001-com-ubuntu-server-jammy"
sku = "22_04-lts-gen2"
version = "latest"
}
identity {
type = "SystemAssigned"
}
tags = var.tags
}
resource "azurerm_virtual_machine_extension" "entra_login" {
name = "AADSSHLoginForLinux"
virtual_machine_id = azurerm_linux_virtual_machine.main.id
publisher = "Microsoft.Azure.ActiveDirectory"
type = "AADSSHLoginForLinux"
type_handler_version = "1.0"
tags = var.tags
}

View file

@ -0,0 +1,3 @@
output "vm_name" {
value = module.test_vm.vm_name
}

View file

@ -0,0 +1,23 @@
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "~> 3.115.0"
}
random = {
source = "hashicorp/random"
}
}
required_version = "~> 1.8.5"
}
provider "azurerm" {
features {}
skip_provider_registration = true
}
provider "random" {}

View file

@ -0,0 +1,34 @@
variable "location" {
description = "Region to deploy resources"
default = "East US"
}
variable "resource_group_name" {
description = "Name of the resource group"
default = "n8n-benchmarking"
}
variable "host_size_family" {
description = "Size Family for the Host Group"
default = "DCSv2-Type1"
}
variable "vm_size" {
description = "VM Size"
# 2 vCPUs, 8 GiB memory
default = "Standard_DC2s_v2"
}
variable "number_of_vms" {
description = "Number of VMs to create"
default = 1
}
locals {
common_tags = {
Id = "N8nBenchmark"
Terraform = "true"
Owner = "Catalysts"
CreatedAt = timestamp()
}
}

View file

@ -10,6 +10,8 @@
"start": "./bin/n8n-benchmark",
"test": "echo \"Error: no test specified\" && exit 1",
"typecheck": "tsc --noEmit",
"run-in-cloud": "zx scripts/runInCloud.mjs",
"destroy-cloud-env": "zx scripts/destroyCloudEnv.mjs",
"watch": "concurrently \"tsc -w -p tsconfig.build.json\" \"tsc-alias -w -p tsconfig.build.json\""
},
"engines": {

View file

@ -0,0 +1,90 @@
#!/usr/bin/env zx
/**
* Script that deletes all resources created by the benchmark environment
* and that are older than 2 hours.
*
* Even tho the environment is provisioned using terraform, the terraform
* state is not persisted. Hence we can't use terraform to delete the resources.
* We could store the state to a storage account, but then we wouldn't be able
* to spin up new envs on-demand. Hence this design.
*
* Usage:
* zx scripts/deleteCloudEnv.mjs
*/
// @ts-check
import { $ } from 'zx';
const EXPIRE_TIME_IN_H = 2;
const EXPIRE_TIME_IN_MS = EXPIRE_TIME_IN_H * 60 * 60 * 1000;
const RESOURCE_GROUP_NAME = 'n8n-benchmarking';
async function main() {
const resourcesResult =
await $`az resource list --resource-group ${RESOURCE_GROUP_NAME} --query "[?tags.Id == 'N8nBenchmark'].{id:id, createdAt:tags.CreatedAt}" -o json`;
const resources = JSON.parse(resourcesResult.stdout);
const now = Date.now();
const resourcesToDelete = resources
.filter((resource) => {
if (resource.createdAt === undefined) {
return true;
}
const createdAt = new Date(resource.createdAt);
const resourceExpiredAt = createdAt.getTime() + EXPIRE_TIME_IN_MS;
return now > resourceExpiredAt;
})
.map((resource) => resource.id);
if (resourcesToDelete.length === 0) {
if (resources.length === 0) {
console.log('No resources found in the resource group.');
} else {
console.log(
`Found ${resources.length} resources in the resource group, but none are older than ${EXPIRE_TIME_IN_H} hours.`,
);
}
return;
}
await deleteResources(resourcesToDelete);
}
async function deleteResources(resourceIds) {
// We don't know the order in which resource should be deleted.
// Here's a poor person's approach to try deletion until all complete
const MAX_ITERATIONS = 100;
let i = 0;
const toDelete = [...resourceIds];
console.log(`Deleting ${resourceIds.length} resources...`);
while (toDelete.length > 0) {
const resourceId = toDelete.shift();
const deleted = await deleteById(resourceId);
if (!deleted) {
toDelete.push(resourceId);
}
if (i++ > MAX_ITERATIONS) {
console.log(
`Max iterations reached. Exiting. Could not delete ${toDelete.length} resources.`,
);
process.exit(1);
}
}
}
async function deleteById(id) {
try {
await $`az resource delete --ids ${id}`;
return true;
} catch (error) {
return false;
}
}
main();

View file

@ -0,0 +1,185 @@
#!/usr/bin/env zx
/**
* Script to run benchmarks on the cloud benchmark environment.
* This script will:
* 1. Provision a benchmark environment using Terraform.
* 2. Run the benchmarks on the VM.
* 3. Destroy the cloud environment.
*
* NOTE: Must be run in the root of the package.
*
* Usage:
* zx scripts/runBenchmarksOnCloud.mjs [--debug] <n8n setup to use>
*
*/
// @ts-check
import fs from 'fs';
import minimist from 'minimist';
import { $, sleep, tmpdir, which } from 'zx';
import path from 'path';
import { SshClient } from './sshClient.mjs';
import { TerraformClient } from './terraformClient.mjs';
/**
* @typedef {Object} BenchmarkEnv
* @property {string} vmName
*/
const RESOURCE_GROUP_NAME = 'n8n-benchmarking';
const paths = {
n8nSetupsDir: path.join(path.resolve('scripts'), 'runOnVm', 'n8nSetups'),
};
async function main() {
const config = await parseAndValidateConfig();
await ensureDependencies();
console.log('Using n8n tag', config.n8nTag);
console.log('Using benchmark cli tag', config.benchmarkTag);
const terraformClient = new TerraformClient({
privateKeyPath: paths.privateKeyPath,
isVerbose: config.isVerbose,
});
try {
const benchmarkEnv = await terraformClient.provisionEnvironment();
await runBenchmarksOnVm(config, benchmarkEnv);
} catch (error) {
console.error('An error occurred while running the benchmarks:');
console.error(error);
} finally {
await terraformClient.destroyEnvironment();
}
}
async function ensureDependencies() {
await which('terraform');
await which('az');
}
/**
*
* @param {Config} config
* @param {BenchmarkEnv} benchmarkEnv
*/
async function runBenchmarksOnVm(config, benchmarkEnv) {
console.log(`Setting up the environment for ${config.n8nSetupToUse}...`);
const sshClient = new SshClient({
vmName: benchmarkEnv.vmName,
resourceGroupName: RESOURCE_GROUP_NAME,
verbose: config.isVerbose,
});
await ensureVmIsReachable(sshClient);
const scriptsDir = await transferScriptsToVm(sshClient);
// Bootstrap the environment with dependencies
console.log('Running bootstrap script...');
const bootstrapScriptPath = path.join(scriptsDir, 'bootstrap.sh');
await sshClient.ssh(`chmod a+x ${bootstrapScriptPath} && ${bootstrapScriptPath}`);
// Give some time for the VM to be ready
await sleep(1000);
console.log('Running benchmarks...');
const runScriptPath = path.join(scriptsDir, 'runOnVm.mjs');
await sshClient.ssh(
`npx zx ${runScriptPath} --n8nDockerTag=${config.n8nTag} --benchmarkDockerTag=${config.benchmarkTag} ${config.n8nSetupToUse}`,
{
// Test run should always log its output
verbose: true,
},
);
}
async function ensureVmIsReachable(sshClient) {
await sshClient.ssh('echo "VM is reachable"');
}
/**
* @returns Path where the scripts are located on the VM
*/
async function transferScriptsToVm(sshClient) {
await sshClient.ssh('rm -rf ~/n8n');
await sshClient.ssh('git clone --depth=0 https://github.com/n8n-io/n8n.git');
return '~/n8n/packages/@n8n/benchmark/scripts/runOnVm';
}
function readAvailableN8nSetups() {
const setups = fs.readdirSync(paths.n8nSetupsDir);
return setups;
}
/**
* @typedef {Object} Config
* @property {boolean} isVerbose
* @property {string} n8nSetupToUse
* @property {string} n8nTag
* @property {string} benchmarkTag
*
* @returns {Promise<Config>}
*/
async function parseAndValidateConfig() {
const args = minimist(process.argv.slice(2), {
boolean: ['debug'],
});
const n8nSetupToUse = await getAndValidateN8nSetup(args);
const isVerbose = args.debug || false;
const n8nTag = args.n8nTag || process.env.N8N_DOCKER_TAG || 'latest';
const benchmarkTag = args.benchmarkTag || process.env.BENCHMARK_DOCKER_TAG || 'latest';
return {
isVerbose,
n8nSetupToUse,
n8nTag,
benchmarkTag,
};
}
/**
* @param {minimist.ParsedArgs} args
*/
async function getAndValidateN8nSetup(args) {
// Last parameter is the n8n setup to use
const n8nSetupToUse = args._[args._.length - 1];
if (!n8nSetupToUse) {
printUsage();
process.exit(1);
}
const availableSetups = readAvailableN8nSetups();
if (!availableSetups.includes(n8nSetupToUse)) {
printUsage();
process.exit(1);
}
return n8nSetupToUse;
}
function printUsage() {
const availableSetups = readAvailableN8nSetups();
console.log('Usage: zx scripts/runInCloud.mjs <n8n setup name>');
console.log(' eg: zx scripts/runInCloud.mjs sqlite');
console.log('');
console.log('Options:');
console.log(' --debug Enable verbose output');
console.log(' --n8nTag Docker tag for n8n image. Default is latest');
console.log(' --benchmarkTag Docker tag for benchmark cli image. Default is latest');
console.log('');
console.log('Available setups:');
console.log(` ${availableSetups.join(', ')}`);
}
main().catch(console.error);

View file

@ -0,0 +1,38 @@
#!/bin/bash
#
# Script to initialize the benchmark environment on a VM
#
set -euo pipefail;
CURRENT_USER=$(whoami)
# Mount the data disk
if [ -d "/n8n" ]; then
echo "Data disk already mounted. Clearing it..."
rm -rf /n8n/*
rm -rf /n8n/.[!.]*
else
sudo mkdir -p /n8n
sudo parted /dev/sdc --script mklabel gpt mkpart xfspart xfs 0% 100%
sudo mkfs.xfs /dev/sdc1
sudo partprobe /dev/sdc1
sudo mount /dev/sdc1 /n8n
fi
# Allow the current user to write to the data disk
sudo chmod a+rw /n8n
# Include nodejs v20 repository
curl -fsSL https://deb.nodesource.com/setup_20.x -o nodesource_setup.sh
sudo -E bash nodesource_setup.sh
# Install docker, docker compose and nodejs
sudo DEBIAN_FRONTEND=noninteractive apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y docker.io docker-compose nodejs
# Add the current user to the docker group
sudo usermod -aG docker "$CURRENT_USER"
# Install zx
npm install zx

View file

@ -0,0 +1,16 @@
services:
n8n:
image: ghcr.io/n8n-io/n8n:${N8N_VERSION:-latest}
environment:
- N8N_DIAGNOSTICS_ENABLED=false
- N8N_USER_FOLDER=/n8n
ports:
- 5678:5678
volumes:
- /n8n:/n8n
benchmark:
image: ghcr.io/n8n-io/n8n-benchmark:${N8N_BENCHMARK_VERSION:-latest}
depends_on:
- n8n
environment:
- N8N_BASE_URL=http://n8n:5678

View file

@ -0,0 +1,53 @@
#!/usr/bin/env zx
/**
* This script runs the benchmarks using a given docker compose setup
*/
import { $ } from 'zx';
const [n8nSetupToUse] = argv._;
if (!n8nSetupToUse) {
printUsage();
process.exit(1);
}
function printUsage() {
console.log('Usage: zx runOnVm.mjs <envName>');
console.log(' eg: zx runOnVm.mjs sqlite');
}
async function main() {
const composeFilePath = path.join(__dirname, 'n8nSetups', n8nSetupToUse);
const n8nTag = argv.n8nDockerTag || process.env.N8N_DOCKER_TAG || 'latest';
const benchmarkTag = argv.benchmarkDockerTag || process.env.BENCHMARK_DOCKER_TAG || 'latest';
const $$ = $({
cwd: composeFilePath,
verbose: true,
env: {
N8N_VERSION: n8nTag,
BENCHMARK_VERSION: benchmarkTag,
},
});
try {
await $$`docker-compose up -d n8n`;
await $$`docker-compose run benchmark run`;
} catch (error) {
console.error('An error occurred while running the benchmarks:');
console.error(error);
console.error('');
await dumpN8nInstanceLogs($$);
} finally {
await $$`docker-compose down`;
}
}
async function dumpN8nInstanceLogs($$) {
console.error('n8n instance logs:');
await $$`docker-compose logs n8n`;
}
main();

View file

@ -0,0 +1,28 @@
// @ts-check
import { $ } from 'zx';
export class SshClient {
/**
*
* @param {{ vmName: string; resourceGroupName: string; verbose?: boolean }} param0
*/
constructor({ vmName, resourceGroupName, verbose = false }) {
this.vmName = vmName;
this.resourceGroupName = resourceGroupName;
this.verbose = verbose;
this.$$ = $({
verbose,
});
}
/**
* @param {string} command
* @param {{ verbose?: boolean }} [options]
*/
async ssh(command, options = {}) {
const $$ = options?.verbose ? $({ verbose: true }) : this.$$;
await $$`az ssh vm -n ${this.vmName} -g ${this.resourceGroupName} --yes -- -o StrictHostKeyChecking=accept-new ${command}`;
}
}

View file

@ -0,0 +1,53 @@
// @ts-check
import path from 'path';
import { $, fs } from 'zx';
const paths = {
infraCodeDir: path.resolve('infra'),
terraformStateFile: path.join(path.resolve('infra'), 'terraform.tfstate'),
};
export class TerraformClient {
constructor({ privateKeyPath, isVerbose = false }) {
this.privateKeyPath = privateKeyPath;
this.isVerbose = isVerbose;
this.$$ = $({
cwd: paths.infraCodeDir,
verbose: isVerbose,
});
}
/**
* @typedef {Object} BenchmarkEnv
* @property {string} vmName
*
* @returns {Promise<BenchmarkEnv>}
*/
async provisionEnvironment() {
console.log('Provisioning cloud environment...');
await this.$$`terraform init`;
await this.$$`terraform apply -input=false -auto-approve`;
return {
vmName: await this.getTerraformOutput('vm_name'),
};
}
async destroyEnvironment() {
if (!fs.existsSync(paths.terraformStateFile)) {
console.log('No cloud environment to destroy. Skipping...');
return;
}
console.log('Destroying cloud environment...');
await this.$$`terraform destroy -input=false -auto-approve`;
}
async getTerraformOutput(key) {
const output = await this.$$`terraform output -raw ${key}`;
return output.stdout.trim();
}
}

View file

@ -49,6 +49,12 @@ export class N8nApiClient {
} else if (response.status === 400) {
if (responsePayload.message === 'Instance owner already setup')
console.log('Owner already set up');
} else if (response.status === 404) {
// The n8n instance setup owner endpoint not be available yet even tho
// the health endpoint returns ok. In this case we simply retry.
console.log('Owner setup endpoint not available yet, retrying in 1s...');
await this.delay(1000);
await this.setupOwnerIfNeeded(loginDetails);
} else {
throw new Error(
`Owner setup failed with status ${response.status}: ${responsePayload.message}`,