Merge branch 'n8n-io:master' into MongoDB_vector_store

This commit is contained in:
Pash10g 2025-02-19 17:56:00 +02:00 committed by GitHub
commit 5cfd0bee11
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
1731 changed files with 118083 additions and 24658 deletions

View file

@ -1,6 +1,6 @@
services:
mariadb:
image: mariadb:10.9
image: mariadb:10.5
environment:
- MARIADB_DATABASE=n8n
- MARIADB_ROOT_PASSWORD=password
@ -10,6 +10,26 @@ services:
tmpfs:
- /var/lib/mysql
mysql-8.0.13:
image: mysql:8.0.13
environment:
- MYSQL_DATABASE=n8n
- MYSQL_ROOT_PASSWORD=password
ports:
- 3306:3306
tmpfs:
- /var/lib/mysql
mysql-8.4:
image: mysql:8.4
environment:
- MYSQL_DATABASE=n8n
- MYSQL_ROOT_PASSWORD=password
ports:
- 3306:3306
tmpfs:
- /var/lib/mysql
postgres:
image: postgres:16
restart: always

View file

@ -29,11 +29,14 @@ jobs:
tenant-id: ${{ secrets.BENCHMARK_ARM_TENANT_ID }}
subscription-id: ${{ secrets.BENCHMARK_ARM_SUBSCRIPTION_ID }}
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- name: Install dependencies
run: pnpm install --frozen-lockfile

View file

@ -48,11 +48,14 @@ jobs:
with:
terraform_version: '1.8.5'
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: pnpm
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- name: Install dependencies
run: pnpm install --frozen-lockfile

View file

@ -16,11 +16,14 @@ jobs:
steps:
- uses: actions/checkout@v4.1.1
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- name: Install dependencies
run: pnpm install --frozen-lockfile

View file

@ -17,11 +17,14 @@ jobs:
- name: Check out branch
uses: actions/checkout@v4.1.1
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- name: Install dependencies
run: pnpm install --frozen-lockfile

View file

@ -55,11 +55,16 @@ jobs:
- uses: actions/checkout@v4.1.1
with:
fetch-depth: 0
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- run: pnpm install --frozen-lockfile
- name: Publish to Chromatic

View file

@ -16,11 +16,14 @@ jobs:
steps:
- uses: actions/checkout@v4.1.1
- run: corepack enable
- uses: useblacksmith/setup-node@v5
with:
node-version: 20.x
cache: pnpm
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- name: Install dependencies
run: pnpm install --frozen-lockfile

View file

@ -23,11 +23,16 @@ jobs:
if: github.event_name != 'pull_request_review' || startsWith(github.event.pull_request.base.ref, 'release/')
steps:
- uses: actions/checkout@v4.1.1
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- run: pnpm install --frozen-lockfile
- name: Setup build cache
@ -37,7 +42,7 @@ jobs:
run: pnpm build:backend
- name: Cache build artifacts
uses: actions/cache/save@v4.0.0
uses: actions/cache/save@v4.2.0
with:
path: ./packages/**/dist
key: ${{ github.sha }}:db-tests
@ -52,18 +57,23 @@ jobs:
DB_SQLITE_POOL_SIZE: 4
steps:
- uses: actions/checkout@v4.1.1
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- run: pnpm install --frozen-lockfile
- name: Setup build cache
uses: rharkor/caching-for-turbo@v1.5
- name: Restore cached build artifacts
uses: actions/cache/restore@v4.0.0
uses: actions/cache/restore@v4.2.0
with:
path: ./packages/**/dist
key: ${{ github.sha }}:db-tests
@ -81,18 +91,23 @@ jobs:
DB_MYSQLDB_PASSWORD: password
steps:
- uses: actions/checkout@v4.1.1
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- run: pnpm install --frozen-lockfile
- name: Setup build cache
uses: rharkor/caching-for-turbo@v1.5
- name: Restore cached build artifacts
uses: actions/cache/restore@v4.0.0
uses: actions/cache/restore@v4.2.0
with:
path: ./packages/**/dist
key: ${{ github.sha }}:db-tests
@ -108,6 +123,50 @@ jobs:
working-directory: packages/cli
run: pnpm test:mariadb --testTimeout 30000
mysql:
name: MySQL (${{ matrix.service-name }})
runs-on: ubuntu-latest
needs: build
timeout-minutes: 20
strategy:
matrix:
service-name: [ 'mysql-8.0.13', 'mysql-8.4' ]
env:
DB_MYSQLDB_PASSWORD: password
steps:
- uses: actions/checkout@v4.1.1
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- run: pnpm install --frozen-lockfile
- name: Setup build cache
uses: rharkor/caching-for-turbo@v1.5
- name: Restore cached build artifacts
uses: actions/cache/restore@v4.2.0
with:
path: ./packages/**/dist
key: ${{ github.sha }}:db-tests
- name: Start MySQL
uses: isbang/compose-action@v2.0.0
with:
compose-file: ./.github/docker-compose.yml
services: |
${{ matrix.service-name }}
- name: Test MySQL
working-directory: packages/cli
run: pnpm test:mysql --testTimeout 30000
postgres:
name: Postgres
runs-on: ubuntu-latest
@ -118,18 +177,23 @@ jobs:
DB_POSTGRESDB_POOL_SIZE: 1 # Detect connection pooling deadlocks
steps:
- uses: actions/checkout@v4.1.1
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- run: pnpm install --frozen-lockfile
- name: Setup build cache
uses: rharkor/caching-for-turbo@v1.5
- name: Restore cached build artifacts
uses: actions/cache/restore@v4.0.0
uses: actions/cache/restore@v4.2.0
with:
path: ./packages/**/dist
key: ${{ github.sha }}:db-tests
@ -148,7 +212,7 @@ jobs:
notify-on-failure:
name: Notify Slack on failure
runs-on: ubuntu-latest
needs: [mariadb, postgres]
needs: [mariadb, postgres, mysql]
steps:
- name: Notify Slack on failure
uses: act10ns/slack@v2.0.0
@ -157,4 +221,4 @@ jobs:
status: ${{ job.status }}
channel: '#alerts-build'
webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
message: Postgres or MariaDB tests failed (${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})
message: Postgres, MariaDB or MySQL tests failed (${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})

View file

@ -17,11 +17,14 @@ jobs:
with:
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- run: corepack enable
- uses: useblacksmith/setup-node@v5
with:
node-version: 20.x
cache: pnpm
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- name: Install dependencies
run: pnpm install --frozen-lockfile

View file

@ -11,8 +11,6 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4.1.1
with:
ref: master
- name: Set up QEMU
uses: docker/setup-qemu-action@v3.3.0

View file

@ -99,7 +99,7 @@ jobs:
run: pnpm cypress:install
- name: Cache build artifacts
uses: actions/cache/save@v4.0.0
uses: actions/cache/save@v4.2.0
with:
path: |
/github/home/.cache
@ -133,7 +133,7 @@ jobs:
- uses: pnpm/action-setup@v4.0.0
- name: Restore cached pnpm modules
uses: actions/cache/restore@v4.0.0
uses: actions/cache/restore@v4.2.0
with:
path: |
/github/home/.cache

View file

@ -25,11 +25,14 @@ jobs:
with:
ref: ${{ inputs.ref }}
- run: corepack enable
- uses: useblacksmith/setup-node@v5
with:
node-version: 20.x
cache: pnpm
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- name: Install dependencies
run: pnpm install --frozen-lockfile

View file

@ -35,13 +35,17 @@ jobs:
fetch-depth: 0
ref: ${{ github.event.inputs.base-branch }}
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
- run: npm install --prefix=.github/scripts --no-package-lock
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- name: Bump package versions
run: |
echo "NEXT_RELEASE=$(node .github/scripts/bump-versions.mjs)" >> $GITHUB_ENV

View file

@ -25,11 +25,15 @@ jobs:
with:
fetch-depth: 0
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- run: pnpm install --frozen-lockfile
- name: Set release version in env
@ -39,7 +43,7 @@ jobs:
run: pnpm build
- name: Cache build artifacts
uses: actions/cache/save@v4.0.0
uses: actions/cache/save@v4.2.0
with:
path: ./packages/**/dist
key: ${{ github.sha }}-release:build
@ -140,7 +144,7 @@ jobs:
steps:
- uses: actions/checkout@v4.1.1
- name: Restore cached build artifacts
uses: actions/cache/restore@v4.0.0
uses: actions/cache/restore@v4.2.0
with:
path: ./packages/**/dist
key: ${{ github.sha }}-release:build
@ -161,6 +165,14 @@ jobs:
version: ${{ needs.publish-to-npm.outputs.release }}
sourcemaps: packages/cli/dist packages/core/dist packages/nodes-base/dist packages/@n8n/n8n-nodes-langchain/dist
- name: Create a task runner release
uses: getsentry/action-release@v1.7.0
continue-on-error: true
with:
projects: ${{ secrets.SENTRY_TASK_RUNNER_PROJECT }}
version: ${{ needs.publish-to-npm.outputs.release }}
sourcemaps: packages/core/dist packages/workflow/dist packages/@n8n/task-runner/dist
trigger-release-note:
name: Trigger a release note
needs: [publish-to-npm, create-github-release]

View file

@ -22,7 +22,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
- run: |

View file

@ -22,11 +22,16 @@ jobs:
!contains(github.event.pull_request.labels.*.name, 'community')
steps:
- uses: actions/checkout@v4.1.1
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- run: pnpm install --frozen-lockfile
- name: Setup build cache
@ -36,7 +41,7 @@ jobs:
run: pnpm build:backend
- name: Cache build artifacts
uses: actions/cache/save@v4.0.0
uses: actions/cache/save@v4.2.0
with:
path: ./packages/**/dist
key: ${{ github.sha }}:workflow-tests
@ -48,18 +53,23 @@ jobs:
timeout-minutes: 10
steps:
- uses: actions/checkout@v4.1.1
- run: corepack enable
- uses: actions/setup-node@v4.0.2
- uses: actions/setup-node@v4.2.0
with:
node-version: 20.x
cache: 'pnpm'
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- run: pnpm install --frozen-lockfile
- name: Setup build cache
uses: rharkor/caching-for-turbo@v1.5
- name: Restore cached build artifacts
uses: actions/cache/restore@v4.0.0
uses: actions/cache/restore@v4.2.0
with:
path: ./packages/**/dist
key: ${{ github.sha }}:workflow-tests

View file

@ -47,12 +47,15 @@ jobs:
with:
ref: ${{ inputs.ref }}
- run: corepack enable
- name: Use Node.js ${{ inputs.nodeVersion }}
uses: useblacksmith/setup-node@v5
with:
node-version: ${{ inputs.nodeVersion }}
cache: pnpm
- name: Setup corepack and pnpm
run: |
npm i -g corepack@0.31
corepack enable
- name: Install dependencies
run: pnpm install --frozen-lockfile

View file

@ -5,6 +5,7 @@ pnpm-lock.yaml
packages/editor-ui/index.html
packages/nodes-base/nodes/**/test
packages/cli/templates/form-trigger.handlebars
packages/cli/templates/form-trigger-completion.handlebars
cypress/fixtures
CHANGELOG.md
.github/pull_request_template.md

View file

@ -1,3 +1,113 @@
# [1.79.0](https://github.com/n8n-io/n8n/compare/n8n@1.78.0...n8n@1.79.0) (2025-02-13)
### Bug Fixes
* **Airtable Node:** Use item at correct index in base/getSchema ([#13174](https://github.com/n8n-io/n8n/issues/13174)) ([f2e3586](https://github.com/n8n-io/n8n/commit/f2e35869c143d15ea79017ec103370c4aa92a92f))
* **Basic LLM Chain Node:** Use correct mimetype for binary data ([#12978](https://github.com/n8n-io/n8n/issues/12978)) ([2b1eb04](https://github.com/n8n-io/n8n/commit/2b1eb049f2c639f054e7c5f671483e29fd600567))
* **Call n8n Sub-Workflow Tool Node:** Fix json type when using $fromAI ([#13102](https://github.com/n8n-io/n8n/issues/13102)) ([9e4e1ca](https://github.com/n8n-io/n8n/commit/9e4e1ca1f48b08143883668be037026075eddb25))
* **core:** Add an option to enable dual-stack lookup to support IPv6 for redis ([#13118](https://github.com/n8n-io/n8n/issues/13118)) ([be39d0a](https://github.com/n8n-io/n8n/commit/be39d0a0f11c0274d2be1d7e1579264a95b18f7b))
* **core:** Allow secrets manager secrets to be used in credentials ([#13110](https://github.com/n8n-io/n8n/issues/13110)) ([cae98e7](https://github.com/n8n-io/n8n/commit/cae98e733d4cac7b5082bae345be504d00876c4b))
* **core:** Do not save credential overwrites data into the database ([#13170](https://github.com/n8n-io/n8n/issues/13170)) ([298a7b0](https://github.com/n8n-io/n8n/commit/298a7b00386dcfb670c27e6e1cc374f73d00a7a5))
* **core:** Fix high volume of node operation errors in Sentry ([#13053](https://github.com/n8n-io/n8n/issues/13053)) ([e59d983](https://github.com/n8n-io/n8n/commit/e59d9830bfda51bcf43246e32bf88bd045f8b8ad))
* **core:** Fix resuming executions on test webhooks from Wait nodes ([#13168](https://github.com/n8n-io/n8n/issues/13168)) ([5dddf77](https://github.com/n8n-io/n8n/commit/5dddf772cf1704f65ed86cea1f4640e3b274b14e))
* **core:** Handle cancellation of waiting executions correctly ([#13051](https://github.com/n8n-io/n8n/issues/13051)) ([fc440eb](https://github.com/n8n-io/n8n/commit/fc440eb68bb6750dc096cdaeb50fed7cf0718b07))
* **core:** Handle credential decryption failures gracefully on the API ([#13166](https://github.com/n8n-io/n8n/issues/13166)) ([a4c5334](https://github.com/n8n-io/n8n/commit/a4c5334853cbc71eddbb035b86d3dda68c3ef81e))
* **core:** Handle missing `json` property on `nodeSuccessData` ([#13219](https://github.com/n8n-io/n8n/issues/13219)) ([aedea7a](https://github.com/n8n-io/n8n/commit/aedea7a76c1752410b8734d87a74bd870cd54e2d))
* **core:** Same version of merge node behaves differently after n8n update ([#13106](https://github.com/n8n-io/n8n/issues/13106)) ([df8f059](https://github.com/n8n-io/n8n/commit/df8f059504266667ffd30b5d706bad9dd7e09ab3))
* **editor:** Adjust project plus button color in dark mode ([#13175](https://github.com/n8n-io/n8n/issues/13175)) ([4c19bae](https://github.com/n8n-io/n8n/commit/4c19baea3dd232df67353e8637470c205c37bd92))
* **editor:** Correctly compare old parameter value for nested parameters ([#13179](https://github.com/n8n-io/n8n/issues/13179)) ([b6c0d96](https://github.com/n8n-io/n8n/commit/b6c0d96e4376e1b5fbc80227633eae57b1eff4c0))
* **editor:** Disable fromAI button for vector stores ([#13125](https://github.com/n8n-io/n8n/issues/13125)) ([bde8420](https://github.com/n8n-io/n8n/commit/bde84205f924d79c3eb2bd45d2e3745b53992a0d))
* **editor:** Don't show 'Test workflow' button if the canvas is read-only ([#13199](https://github.com/n8n-io/n8n/issues/13199)) ([56426e9](https://github.com/n8n-io/n8n/commit/56426e989ff4ca01efad0b6d525e12e68d25e87f))
* **editor:** Fix '=' handling in expressions ([#13129](https://github.com/n8n-io/n8n/issues/13129)) ([8f25a06](https://github.com/n8n-io/n8n/commit/8f25a06e6ca7d4d0a78b5aa379a8c124f55a0325))
* **editor:** Fix alignment in RMC component ([#13167](https://github.com/n8n-io/n8n/issues/13167)) ([78644b0](https://github.com/n8n-io/n8n/commit/78644b0ec7007cabd1a908cfd81ec1b3f06db63d))
* **editor:** Fix issues with push connect reconnection ([#13085](https://github.com/n8n-io/n8n/issues/13085)) ([fff98b1](https://github.com/n8n-io/n8n/commit/fff98b16bb7c86e08ec2b1a475eeb7b93bedf2de))
* **editor:** Fix prompt in easy ai template ([#13091](https://github.com/n8n-io/n8n/issues/13091)) ([2eabca5](https://github.com/n8n-io/n8n/commit/2eabca5613dece94231395a89e84c5e4433bb730))
* **editor:** Load only personal credentials when setting up a template ([#12826](https://github.com/n8n-io/n8n/issues/12826)) ([814e2a8](https://github.com/n8n-io/n8n/commit/814e2a89241bdc6a26defb6bfd3d87abdc477ae0))
* **editor:** Make connector buttons background opaque when dark mode is enabled system-wide ([#13180](https://github.com/n8n-io/n8n/issues/13180)) ([77be25d](https://github.com/n8n-io/n8n/commit/77be25d337e32f2bb32e191ee579f30d0442b537))
* **Gmail Trigger Node:** Prevent error for empty emails, improve type safety ([#13171](https://github.com/n8n-io/n8n/issues/13171)) ([115a367](https://github.com/n8n-io/n8n/commit/115a367caeb9cbec8597d328cd969fff5ab6d941))
* **Google Sheets Node:** Do not delete row_number key from input item ([#13158](https://github.com/n8n-io/n8n/issues/13158)) ([da5e4be](https://github.com/n8n-io/n8n/commit/da5e4be0fd645df24892f81c553bc1808e3fec93))
* **Google Sheets Node:** RMC should correctly map columns if data location set in options ([#13116](https://github.com/n8n-io/n8n/issues/13116)) ([5d05f7f](https://github.com/n8n-io/n8n/commit/5d05f7f436a32b98d35a7b87968990e845ec56bb))
* **Google Sheets Trigger Node:** Do not return header row in rowAdded mode ([#13119](https://github.com/n8n-io/n8n/issues/13119)) ([cd8b300](https://github.com/n8n-io/n8n/commit/cd8b300d5c9c2a2911b50c737c88ba799a2f0799))
* **Microsoft OneDrive Node:** Try to download file using downloadUrl ([#13200](https://github.com/n8n-io/n8n/issues/13200)) ([67cd05c](https://github.com/n8n-io/n8n/commit/67cd05c6dc9407b09ed89a9dbb041f2f6d0ac500))
* **OpenAI Node:** Limit chat history to context window when using memory ([#13137](https://github.com/n8n-io/n8n/issues/13137)) ([f057cfb](https://github.com/n8n-io/n8n/commit/f057cfb46af198566935d811ba294e596c9ab5ec))
* **Summarize Node:** Not dealing with null values properly ([#13044](https://github.com/n8n-io/n8n/issues/13044)) ([9324e4f](https://github.com/n8n-io/n8n/commit/9324e4ffe66a8268e3d7d0eb2c127e35caee7a4c))
### Features
* Add ConvertAPI cred only node ([#12663](https://github.com/n8n-io/n8n/issues/12663)) ([731a9a7](https://github.com/n8n-io/n8n/commit/731a9a79456066d72f5d7ba6c343eca420407979))
* **core:** Add an option to allow community nodes as tools ([#13075](https://github.com/n8n-io/n8n/issues/13075)) ([2b133aa](https://github.com/n8n-io/n8n/commit/2b133aa201325e27baddaa2bfd5995dca2093728))
* **core:** Add migration to create model for folders feature ([#13060](https://github.com/n8n-io/n8n/issues/13060)) ([03f4ed8](https://github.com/n8n-io/n8n/commit/03f4ed8445bb1e1f0ef5c82b0164e50db3e2c45c))
* **editor:** Add execute workflow buttons below triggers on the canvas ([#12769](https://github.com/n8n-io/n8n/issues/12769)) ([b17cbec](https://github.com/n8n-io/n8n/commit/b17cbec3af446e67db57a0927011d51a9317dff2))
* **editor:** Add docs link in $fromAI hover info tooltip ([#13097](https://github.com/n8n-io/n8n/issues/13097)) ([ff8b1c1](https://github.com/n8n-io/n8n/commit/ff8b1c11082f4e8caf2a474e57be3b3b328a1be3))
* **editor:** Expose `View Execution` links for erroneous sub-executions ([#13185](https://github.com/n8n-io/n8n/issues/13185)) ([11cf1cd](https://github.com/n8n-io/n8n/commit/11cf1cd23a181714e445ef58e97fdd7dca870dd7))
* **Microsoft Teams Node:** New operation sendAndWait ([#12964](https://github.com/n8n-io/n8n/issues/12964)) ([e925562](https://github.com/n8n-io/n8n/commit/e92556260f2b95022a852825f8475be369f0440a))
* **OpenAI Chat Model Node:** Add reasoning effort option to control the amount of reasoning tokens to use ([#13103](https://github.com/n8n-io/n8n/issues/13103)) ([76e0c99](https://github.com/n8n-io/n8n/commit/76e0c9961344d4baac60a50f1ec1e849e40586e6))
### Performance Improvements
* **core:** Batch workflow activation to speed up startup ([#13191](https://github.com/n8n-io/n8n/issues/13191)) ([17acf70](https://github.com/n8n-io/n8n/commit/17acf70591422bfea84b13f24c35d628bff4d35e))
# [1.78.0](https://github.com/n8n-io/n8n/compare/n8n@1.77.0...n8n@1.78.0) (2025-02-06)
### Bug Fixes
* **AI Agent Node:** Ignore SSL errors option for SQLAgent ([#13052](https://github.com/n8n-io/n8n/issues/13052)) ([a90529f](https://github.com/n8n-io/n8n/commit/a90529fd51ca88bc9640d24490dbeb2023c98e30))
* **Code Node:** Do not validate code within comments ([#12938](https://github.com/n8n-io/n8n/issues/12938)) ([cdfa225](https://github.com/n8n-io/n8n/commit/cdfa22593b69cf647c2a798d6571a9bbbd11c1b2))
* **core:** "Respond to Webhook" should work with workflows with waiting nodes ([#12806](https://github.com/n8n-io/n8n/issues/12806)) ([e8635f2](https://github.com/n8n-io/n8n/commit/e8635f257433748f4d7d2c4b0ae794de6bff5b28))
* **core:** Do not emit `workflow-post-execute` event for waiting executions ([#13065](https://github.com/n8n-io/n8n/issues/13065)) ([1593b6c](https://github.com/n8n-io/n8n/commit/1593b6cb4112ab2a85ca93c4eaec7d5f088895b1))
* **core:** Do not enable strict type validation by default for resource mapper ([#13037](https://github.com/n8n-io/n8n/issues/13037)) ([fdcff90](https://github.com/n8n-io/n8n/commit/fdcff9082b97314f8b04579ab6fa81c724916320))
* **core:** Fix empty node execution stack ([#12945](https://github.com/n8n-io/n8n/issues/12945)) ([7031569](https://github.com/n8n-io/n8n/commit/7031569a028bcc85558fcb614f8143d68a7f81f0))
* **core:** Only use new resource mapper type validation when it is enabled ([#13099](https://github.com/n8n-io/n8n/issues/13099)) ([a37c8e8](https://github.com/n8n-io/n8n/commit/a37c8e8fb86aaa3244ac13500ffa0e7c0d809a6f))
* **editor:** Actually enforce the version and don't break for old values in local storage ([#13025](https://github.com/n8n-io/n8n/issues/13025)) ([884a7e2](https://github.com/n8n-io/n8n/commit/884a7e23f84258756d8dcdd2dfe933bdedf61adc))
* **editor:** Add telemetry to source control feature ([#13016](https://github.com/n8n-io/n8n/issues/13016)) ([18eaa54](https://github.com/n8n-io/n8n/commit/18eaa5423dfc9348374c2cff4ae0e6f152268fbb))
* **editor:** Allow switch to `Fixed` for boolean and number parameters with invalid expressions ([#12948](https://github.com/n8n-io/n8n/issues/12948)) ([118be24](https://github.com/n8n-io/n8n/commit/118be24d25f001525ced03d9426a6129fa5a2053))
* **editor:** Allow to re-open sub-connection node creator if already active ([#13041](https://github.com/n8n-io/n8n/issues/13041)) ([16d59e9](https://github.com/n8n-io/n8n/commit/16d59e98edc427bf68edbce4cd2174a44d6dcfb1))
* **editor:** Code node overwrites code when switching nodes after edits ([#13078](https://github.com/n8n-io/n8n/issues/13078)) ([00e3ebc](https://github.com/n8n-io/n8n/commit/00e3ebc9e2e0b8cc2d88b678c3a2a21602dac010))
* **editor:** Fix execution running status listener for chat messages ([#12951](https://github.com/n8n-io/n8n/issues/12951)) ([4d55a29](https://github.com/n8n-io/n8n/commit/4d55a294600dc2c86f6f7019da923b66a4b9de7e))
* **editor:** Fix position of connector buttons when the line is straight ([#13034](https://github.com/n8n-io/n8n/issues/13034)) ([3a908ac](https://github.com/n8n-io/n8n/commit/3a908aca17f0bc1cf5fb5eb8813cc94f27f0bcdf))
* **editor:** Fix showing and hiding canvas edge toolbar when hovering ([#13009](https://github.com/n8n-io/n8n/issues/13009)) ([ac7bc4f](https://github.com/n8n-io/n8n/commit/ac7bc4f1911f913233eeeae5d229432fdff332c4))
* **editor:** Make AI transform node read only in executions view ([#12970](https://github.com/n8n-io/n8n/issues/12970)) ([ce1deb8](https://github.com/n8n-io/n8n/commit/ce1deb8aea528eef996fc774d0fff1dc61df5843))
* **editor:** Prevent infinite loop in expressions crashing the browser ([#12732](https://github.com/n8n-io/n8n/issues/12732)) ([8c2dbcf](https://github.com/n8n-io/n8n/commit/8c2dbcfeced70a0a84137773269cc6db2928d174))
* **editor:** Refine push modal layout ([#12886](https://github.com/n8n-io/n8n/issues/12886)) ([212a5bf](https://github.com/n8n-io/n8n/commit/212a5bf23eb11cc3296e7a8d002a4b7727d5193c))
* **editor:** SchemaView renders duplicate structures properly ([#12943](https://github.com/n8n-io/n8n/issues/12943)) ([0d8a544](https://github.com/n8n-io/n8n/commit/0d8a544975f72724db931778d7e3ace8a12b6cfc))
* **editor:** Update node issues when opening execution ([#12972](https://github.com/n8n-io/n8n/issues/12972)) ([1a91523](https://github.com/n8n-io/n8n/commit/1a915239c6571d7744023c6df6242dabe97c912e))
* **editor:** Use correct connection index when connecting adjancent nodes after deleting a node ([#12973](https://github.com/n8n-io/n8n/issues/12973)) ([c7a15d5](https://github.com/n8n-io/n8n/commit/c7a15d5980d181a865f8e2ec6a5f70d0681dcf56))
* **GitHub Node:** Don't truncate filenames retrieved from GitHub ([#12923](https://github.com/n8n-io/n8n/issues/12923)) ([7e18447](https://github.com/n8n-io/n8n/commit/7e1844757fe0d544e8881d229d16af95ed53fb21))
* **Google Cloud Firestore Node:** Fix potential prototype pollution vulnerability ([#13035](https://github.com/n8n-io/n8n/issues/13035)) ([f150f79](https://github.com/n8n-io/n8n/commit/f150f79ad6c7d43e036688b1de8d6c2c8140aca9))
* Increment runIndex in WorkflowToolV2 tool executions to avoid reusing out of date inputs ([#13008](https://github.com/n8n-io/n8n/issues/13008)) ([cc907fb](https://github.com/n8n-io/n8n/commit/cc907fbca9aa00fe07dd54a2fcac8983f2321ad1))
* Sync partial execution version of FE and BE, also allow enforcing a specific version ([#12840](https://github.com/n8n-io/n8n/issues/12840)) ([a155043](https://github.com/n8n-io/n8n/commit/a15504329bac582225185705566297d9cc27bf73))
* **Wise Node:** Use ISO formatting for timestamps ([#10288](https://github.com/n8n-io/n8n/issues/10288)) ([1a2d39a](https://github.com/n8n-io/n8n/commit/1a2d39a158c9a61bdaf11124b09ae70de65ebbf1))
### Features
* Add reusable frontend `composables` package ([#13077](https://github.com/n8n-io/n8n/issues/13077)) ([ef87da4](https://github.com/n8n-io/n8n/commit/ef87da4c193a08e089e48044906a4f5ce9959a22))
* Add support for client credentials with Azure Log monitor ([#13038](https://github.com/n8n-io/n8n/issues/13038)) ([2c2d631](https://github.com/n8n-io/n8n/commit/2c2d63157b7866f1a68cc45c5823e29570ccff77))
* Allow multi API creation via the UI ([#12845](https://github.com/n8n-io/n8n/issues/12845)) ([ad3250c](https://github.com/n8n-io/n8n/commit/ad3250ceb0df84379917e684d54d4100e3bf44f5))
* Allow setting API keys expiration ([#12954](https://github.com/n8n-io/n8n/issues/12954)) ([9bcbc2c](https://github.com/n8n-io/n8n/commit/9bcbc2c2ccbb88537e9b7554c92b631118d870f1))
* **core:** Add sorting to GET `/workflows` endpoint ([#13029](https://github.com/n8n-io/n8n/issues/13029)) ([b60011a](https://github.com/n8n-io/n8n/commit/b60011a1808d47f32ab84e685dba0e915e82df8f))
* **core:** Enable usage as a tool for more nodes ([#12930](https://github.com/n8n-io/n8n/issues/12930)) ([9deb759](https://github.com/n8n-io/n8n/commit/9deb75916e4eb63b899ba79b40cbd24b69a752db))
* **core:** Handle Declarative nodes more like regular nodes ([#13007](https://github.com/n8n-io/n8n/issues/13007)) ([a65a9e6](https://github.com/n8n-io/n8n/commit/a65a9e631b13bbe70ad64727fb1109ae7cd014eb))
* **Discord Node:** New sendAndWait operation ([#12894](https://github.com/n8n-io/n8n/issues/12894)) ([d47bfdd](https://github.com/n8n-io/n8n/commit/d47bfddd656367454b51da39cf87dbfb2bd59eb2))
* **editor:** Display schema preview for unexecuted nodes ([#12901](https://github.com/n8n-io/n8n/issues/12901)) ([0063bbb](https://github.com/n8n-io/n8n/commit/0063bbb30b45b3af92aff4c0f76b905d50a71a2d))
* **editor:** Easy $fromAI Button for AI Tools ([#12587](https://github.com/n8n-io/n8n/issues/12587)) ([2177376](https://github.com/n8n-io/n8n/commit/21773764d37c37a6464a3885d3fa548a5feb4fd8))
* **editor:** Show fixed collection parameter issues in UI ([#12899](https://github.com/n8n-io/n8n/issues/12899)) ([12d686c](https://github.com/n8n-io/n8n/commit/12d686ce52694f4c0b88f92a744451c1b0c66dec))
* **Facebook Graph API Node:** Update node to support API v22.0 ([#13024](https://github.com/n8n-io/n8n/issues/13024)) ([0bc0fc6](https://github.com/n8n-io/n8n/commit/0bc0fc6c1226688c29bf5f8f0ba7e8f244e16fbc))
* **HTTP Request Tool Node:** Relax binary data detection ([#13048](https://github.com/n8n-io/n8n/issues/13048)) ([b67a003](https://github.com/n8n-io/n8n/commit/b67a003e0b154d4e8c04392bec1c7b28171b5908))
* Human in the loop section ([#12883](https://github.com/n8n-io/n8n/issues/12883)) ([9590e5d](https://github.com/n8n-io/n8n/commit/9590e5d58b8964de9ce901bf07b537926d18b6b7))
* **n8n Form Node:** Add Hidden Fields ([#12803](https://github.com/n8n-io/n8n/issues/12803)) ([0da1114](https://github.com/n8n-io/n8n/commit/0da1114981978e371b216bdabc0c3bbdceeefa09))
* **n8n Form Node:** Respond with Text ([#12979](https://github.com/n8n-io/n8n/issues/12979)) ([182fc15](https://github.com/n8n-io/n8n/commit/182fc150bec62e9a5e2801d6c403e4a6bd35f728))
* **OpenAI Chat Model Node, OpenAI Node:** Include o3 models in model selection ([#13005](https://github.com/n8n-io/n8n/issues/13005)) ([37d152c](https://github.com/n8n-io/n8n/commit/37d152c148cafbe493c22e07f5d55ff24fcb0ca4))
* **Summarize Node:** Preserves original field data type ([#13069](https://github.com/n8n-io/n8n/issues/13069)) ([be5e49d](https://github.com/n8n-io/n8n/commit/be5e49d56c09d65c9768e948471626cfd3606c0c))
# [1.77.0](https://github.com/n8n-io/n8n/compare/n8n@1.76.0...n8n@1.77.0) (2025-01-29)

View file

@ -85,7 +85,7 @@ This automatically sets up file-links between modules which depend on each other
We recommend enabling [Node.js corepack](https://nodejs.org/docs/latest-v16.x/api/corepack.html) with `corepack enable`.
With Node.js v16.17 or newer, you can install the latest version of pnpm: `corepack prepare pnpm@latest --activate`. If you use an older version install at least version 7.18 of pnpm via: `corepack prepare pnpm@7.18.0 --activate`.
With Node.js v16.17 or newer, you can install the latest version of pnpm: `corepack prepare pnpm@latest --activate`. If you use an older version install at least version 9.15 of pnpm via: `corepack prepare pnpm@9.15.5 --activate`.
**IMPORTANT**: If you have installed Node.js via homebrew, you'll need to run `brew install corepack`, since homebrew explicitly removes `npm` and `corepack` from [the `node` formula](https://github.com/Homebrew/homebrew-core/blob/master/Formula/node.rb#L66).

View file

@ -46,6 +46,7 @@ component_management:
- packages/@n8n/codemirror-lang/**
- packages/design-system/**
- packages/editor-ui/**
- packages/frontend/**
- component_id: nodes_packages
name: Nodes
paths:

View file

@ -4,6 +4,10 @@
import { getVisiblePopper, getVisibleSelect } from '../utils/popper';
export function getNdvContainer() {
return cy.getByTestId('ndv');
}
export function getCredentialSelect(eq = 0) {
return cy.getByTestId('node-credentials-select').eq(eq);
}
@ -225,7 +229,7 @@ export function populateMapperFields(fields: ReadonlyArray<[string, string]>) {
getParameterInputByName(name).type(value);
// Click on a parent to dismiss the pop up which hides the field below.
getParameterInputByName(name).parent().parent().parent().click('topLeft');
getParameterInputByName(name).parent().parent().parent().parent().click('topLeft');
}
}

View file

@ -101,8 +101,8 @@ export function getNodeCreatorItems() {
return cy.getByTestId('item-iterator-item');
}
export function getExecuteWorkflowButton() {
return cy.getByTestId('execute-workflow-button');
export function getExecuteWorkflowButton(triggerNodeName?: string) {
return cy.getByTestId(`execute-workflow-button${triggerNodeName ? `-${triggerNodeName}` : ''}`);
}
export function getManualChatButton() {
@ -294,8 +294,8 @@ export function addRetrieverNodeToParent(nodeName: string, parentNodeName: strin
addSupplementalNodeToParent(nodeName, 'ai_retriever', parentNodeName);
}
export function clickExecuteWorkflowButton() {
getExecuteWorkflowButton().click();
export function clickExecuteWorkflowButton(triggerNodeName?: string) {
getExecuteWorkflowButton(triggerNodeName).click();
}
export function clickManualChatButton() {

View file

@ -97,4 +97,45 @@ describe('Workflows', () => {
WorkflowsPage.getters.workflowCards().should('have.length', 1);
});
it('should preserve filters and pagination in URL', () => {
// Add a search query
WorkflowsPage.getters.searchBar().type('My');
// Add a tag filter
WorkflowsPage.getters.workflowFilterButton().click();
WorkflowsPage.getters.workflowTagsDropdown().click();
WorkflowsPage.getters.workflowTagItem('other-tag-1').click();
WorkflowsPage.getters.workflowsListContainer().click();
// Update sort order
WorkflowsPage.getters.workflowSortDropdown().click();
WorkflowsPage.getters.workflowSortItem('Sort by last created').click({ force: true });
// Update page size
WorkflowsPage.getters.workflowListPageSizeDropdown().click();
WorkflowsPage.getters.workflowListPageSizeItem('25').click();
// URL should contain all applied filters and pagination
cy.url().should('include', 'search=My');
// Cannot really know tag id, so just check if it contains 'tags='
cy.url().should('include', 'tags=');
cy.url().should('include', 'sort=lastCreated');
cy.url().should('include', 'pageSize=25');
// Reload the page
cy.reload();
// Check if filters and pagination are preserved
WorkflowsPage.getters.searchBar().should('have.value', 'My');
WorkflowsPage.getters.workflowFilterButton().click();
WorkflowsPage.getters.workflowTagsDropdown().should('contain.text', 'other-tag-1');
WorkflowsPage.getters
.workflowSortItem('Sort by last created')
.should('have.attr', 'aria-selected', 'true');
WorkflowsPage.getters
.workflowListPageSizeItem('25', false)
.should('have.attr', 'aria-selected', 'true');
// Aso, check if the URL is preserved
cy.url().should('include', 'search=My');
cy.url().should('include', 'tags=');
cy.url().should('include', 'sort=lastCreated');
cy.url().should('include', 'pageSize=25');
});
});

View file

@ -1,3 +1,4 @@
import { EDIT_FIELDS_SET_NODE_NAME } from '../constants';
import { NDV } from '../pages/ndv';
import { WorkflowPage as WorkflowPageClass } from '../pages/workflow';
@ -24,6 +25,25 @@ describe('Inline expression editor', () => {
ndv.getters.outputPanel().click();
WorkflowPage.getters.inlineExpressionEditorOutput().should('not.exist');
});
it('should switch between expression and fixed using keyboard', () => {
WorkflowPage.actions.addNodeToCanvas(EDIT_FIELDS_SET_NODE_NAME);
WorkflowPage.actions.openNode(EDIT_FIELDS_SET_NODE_NAME);
// Should switch to expression with =
ndv.getters.assignmentCollectionAdd('assignments').click();
ndv.actions.typeIntoParameterInput('value', '=');
// Should complete {{ --> {{ | }}
WorkflowPage.getters.inlineExpressionEditorInput().click().type('{{');
WorkflowPage.getters.inlineExpressionEditorInput().should('have.text', '{{ }}');
// Should switch back to fixed with backspace on empty expression
ndv.actions.typeIntoParameterInput('value', '{selectall}{backspace}');
ndv.getters.parameterInput('value').click();
ndv.actions.typeIntoParameterInput('value', '{backspace}');
ndv.getters.inlineExpressionEditorInput().should('not.exist');
});
});
describe('Static data', () => {

View file

@ -62,12 +62,14 @@ describe('n8n Form Trigger', () => {
getVisibleSelect().contains('Dropdown').click();
cy.contains('button', 'Add Field Option').click();
cy.contains('label', 'Field Options')
.parent()
.parent()
.nextAll()
.find('[data-test-id="parameter-input-field"]')
.eq(0)
.type('Option 1');
cy.contains('label', 'Field Options')
.parent()
.parent()
.nextAll()
.find('[data-test-id="parameter-input-field"]')

View file

@ -1,3 +1,11 @@
import { clickGetBackToCanvas, getNdvContainer, getOutputTableRow } from '../composables/ndv';
import {
clickExecuteWorkflowButton,
getExecuteWorkflowButton,
getNodeByName,
getZoomToFitButton,
openNode,
} from '../composables/workflow';
import { SCHEDULE_TRIGGER_NODE_NAME, EDIT_FIELDS_SET_NODE_NAME } from '../constants';
import { NDV, WorkflowExecutionsTab, WorkflowPage as WorkflowPageClass } from '../pages';
import { clearNotifications, errorToast, successToast } from '../pages/notifications';
@ -214,6 +222,39 @@ describe('Execution', () => {
workflowPage.getters.clearExecutionDataButton().should('not.exist');
});
it('should test workflow with specific trigger node', () => {
cy.createFixtureWorkflow('Two_schedule_triggers.json');
getZoomToFitButton().click();
getExecuteWorkflowButton('Trigger A').should('not.be.visible');
getExecuteWorkflowButton('Trigger B').should('not.be.visible');
// Execute the workflow from trigger A
getNodeByName('Trigger A').realHover();
getExecuteWorkflowButton('Trigger A').should('be.visible');
getExecuteWorkflowButton('Trigger B').should('not.be.visible');
clickExecuteWorkflowButton('Trigger A');
// Check the output
successToast().contains('Workflow executed successfully');
openNode('Edit Fields');
getOutputTableRow(1).should('include.text', 'Trigger A');
clickGetBackToCanvas();
getNdvContainer().should('not.be.visible');
// Execute the workflow from trigger B
getNodeByName('Trigger B').realHover();
getExecuteWorkflowButton('Trigger A').should('not.be.visible');
getExecuteWorkflowButton('Trigger B').should('be.visible');
clickExecuteWorkflowButton('Trigger B');
// Check the output
successToast().contains('Workflow executed successfully');
openNode('Edit Fields');
getOutputTableRow(1).should('include.text', 'Trigger B');
});
describe('execution preview', () => {
it('when deleting the last execution, it should show empty state', () => {
workflowPage.actions.addInitialNodeToCanvas('Manual Trigger');

View file

@ -101,18 +101,22 @@ describe('Variables', () => {
variablesPage.getters.searchBar().type('NEW');
variablesPage.getters.variablesRows().should('have.length', 1);
variablesPage.getters.variableRow('NEW').should('contain.text', 'ENV_VAR_NEW');
cy.url().should('include', 'search=NEW');
// Multiple Results
variablesPage.getters.searchBar().clear().type('ENV_VAR');
variablesPage.getters.variablesRows().should('have.length', 2);
cy.url().should('include', 'search=ENV_VAR');
// All Results
variablesPage.getters.searchBar().clear().type('ENV');
variablesPage.getters.variablesRows().should('have.length', 3);
cy.url().should('include', 'search=ENV');
// No Results
variablesPage.getters.searchBar().clear().type('Some non-existent variable');
variablesPage.getters.variablesRows().should('not.exist');
cy.url().should('include', 'search=Some+non-existent+variable');
cy.contains('No variables found').should('be.visible');
});

View file

@ -1,4 +1,4 @@
import type { ExecutionError } from 'n8n-workflow/src';
import type { ExecutionError } from 'n8n-workflow';
import {
closeManualChatModal,

View file

@ -571,4 +571,13 @@ describe('Node Creator', () => {
addVectorStoreToolToParent('In-Memory Vector Store', AGENT_NODE_NAME);
});
it('should insert node to canvas with sendAndWait operation selected', () => {
nodeCreatorFeature.getters.canvasAddButton().click();
WorkflowPage.actions.addNodeToCanvas('Manual', false);
nodeCreatorFeature.actions.openNodeCreator();
cy.contains('Human in the loop').click();
nodeCreatorFeature.getters.getCreatorItem('Slack').click();
cy.contains('Send and Wait for Response').should('exist');
});
});

View file

@ -49,7 +49,8 @@ describe('Editors', () => {
ndv.getters
.sqlEditorContainer()
.find('.cm-content')
.type('SELECT * FROM {{ $json.table }}', { parseSpecialCharSequences: false });
// }} is inserted automatically by bracket matching
.type('SELECT * FROM {{ $json.table', { parseSpecialCharSequences: false });
workflowPage.getters
.inlineExpressionEditorOutput()
.should('have.text', 'SELECT * FROM test_table');

View file

@ -90,8 +90,8 @@ describe('Sub-workflow creation and typed usage', () => {
clickExecuteNode();
const expected = [
['-1', 'A String', '0:11:true2:3', 'aKey:-1', '[empty object]', 'false'],
['-1', 'Another String', '[empty array]', 'aDifferentKey:-1', '[empty array]', 'false'],
['-1', 'A String', '0:11:true2:3', 'aKey:-1', '[empty object]', 'true'],
['-1', 'Another String', '[empty array]', 'aDifferentKey:-1', '[empty array]', 'true'],
];
assertOutputTableContent(expected);
@ -110,8 +110,8 @@ describe('Sub-workflow creation and typed usage', () => {
clickExecuteNode();
const expected2 = [
['-1', '5', '0:11:true2:3', 'aKey:-1', '[empty object]', 'false'],
['-1', '5', '[empty array]', 'aDifferentKey:-1', '[empty array]', 'false'],
['-1', '5', '0:11:true2:3', 'aKey:-1', '[empty object]', 'true'],
['-1', '5', '[empty array]', 'aDifferentKey:-1', '[empty array]', 'true'],
];
assertOutputTableContent(expected2);
@ -167,8 +167,8 @@ describe('Sub-workflow creation and typed usage', () => {
);
assertOutputTableContent([
['[null]', '[null]', '[null]', '[null]', '[null]', 'false'],
['[null]', '[null]', '[null]', '[null]', '[null]', 'false'],
['[null]', '[null]', '[null]', '[null]', '[null]', 'true'],
['[null]', '[null]', '[null]', '[null]', '[null]', 'true'],
]);
clickExecuteNode();

View file

@ -7,6 +7,9 @@ import { WorkflowPage as WorkflowPageClass } from '../pages/workflow';
const WorkflowPage = new WorkflowPageClass();
const ndv = new NDV();
const getParameter = () => ndv.getters.parameterInput('jsCode').should('be.visible');
const getEditor = () => getParameter().find('.cm-content').should('exist');
describe('Code node', () => {
describe('Code editor', () => {
beforeEach(() => {
@ -40,10 +43,23 @@ describe('Code node', () => {
successToast().contains('Node executed successfully');
});
it('should show lint errors in `runOnceForAllItems` mode', () => {
const getParameter = () => ndv.getters.parameterInput('jsCode').should('be.visible');
const getEditor = () => getParameter().find('.cm-content').should('exist');
it('should allow switching between sibling code nodes', () => {
// Setup
getEditor().type('{selectall}').paste("console.log('code node 1')");
ndv.actions.close();
WorkflowPage.actions.addNodeToCanvas('Code', true, true);
getEditor().type('{selectall}').paste("console.log('code node 2')");
ndv.actions.close();
WorkflowPage.actions.openNode('Code');
ndv.actions.clickFloatingNode('Code1');
getEditor().should('have.text', "console.log('code node 2')");
ndv.actions.clickFloatingNode('Code');
getEditor().should('have.text', "console.log('code node 1')");
});
it('should show lint errors in `runOnceForAllItems` mode', () => {
getEditor()
.type('{selectall}')
.paste(`$input.itemMatching()
@ -66,9 +82,6 @@ return
});
it('should show lint errors in `runOnceForEachItem` mode', () => {
const getParameter = () => ndv.getters.parameterInput('jsCode').should('be.visible');
const getEditor = () => getParameter().find('.cm-content').should('exist');
ndv.getters.parameterInput('mode').click();
ndv.actions.selectOptionInParameterDropdown('mode', 'Run Once for Each Item');
getEditor()

View file

@ -0,0 +1,76 @@
{
"nodes": [
{
"parameters": {
"assignments": {
"assignments": [
{
"id": "6a8c3d85-26f8-4f28-ace9-55a196a23d37",
"name": "prevNode",
"value": "={{ $prevNode.name }}",
"type": "string"
}
]
},
"options": {}
},
"type": "n8n-nodes-base.set",
"typeVersion": 3.4,
"position": [200, -100],
"id": "351ce967-0399-4a78-848a-9cc69b831796",
"name": "Edit Fields"
},
{
"parameters": {
"rule": {
"interval": [{}]
}
},
"type": "n8n-nodes-base.scheduleTrigger",
"typeVersion": 1.2,
"position": [0, -100],
"id": "cf2f58a8-1fbb-4c70-b2b1-9e06bee7ec47",
"name": "Trigger A"
},
{
"parameters": {
"rule": {
"interval": [{}]
}
},
"type": "n8n-nodes-base.scheduleTrigger",
"typeVersion": 1.2,
"position": [0, 100],
"id": "4fade34e-2bfc-4a2e-a8ed-03ab2ed9c690",
"name": "Trigger B"
}
],
"connections": {
"Trigger A": {
"main": [
[
{
"node": "Edit Fields",
"type": "main",
"index": 0
}
]
]
},
"Trigger B": {
"main": [
[
{
"node": "Edit Fields",
"type": "main",
"index": 0
}
]
]
}
},
"pinData": {},
"meta": {
"instanceId": "0dd4627b77a5a795ab9bf073e5812be94dd8d1a5f012248ef2a4acac09be12cb"
}
}

View file

@ -151,6 +151,9 @@ export class NDV extends BasePage {
schemaViewNodeName: () => cy.getByTestId('run-data-schema-node-name'),
expressionExpanders: () => cy.getByTestId('expander'),
expressionModalOutput: () => cy.getByTestId('expression-modal-output'),
floatingNodes: () => cy.getByTestId('floating-node'),
floatingNodeByName: (name: string) =>
cy.getByTestId('floating-node').filter(`[data-node-name="${name}"]`),
};
actions = {
@ -201,7 +204,7 @@ export class NDV extends BasePage {
typeIntoParameterInput: (
parameterName: string,
content: string,
opts?: { parseSpecialCharSequences: boolean },
opts?: Partial<Cypress.TypeOptions>,
) => {
this.getters.parameterInput(parameterName).type(content, opts);
},
@ -339,6 +342,9 @@ export class NDV extends BasePage {
dragMainPanelToRight: () => {
cy.drag('[data-test-id=panel-drag-button]', [1000, 0], { moveTwice: true });
},
clickFloatingNode: (name: string) => {
this.getters.floatingNodeByName(name).realHover().click({ force: true });
},
};
}

View file

@ -47,6 +47,18 @@ export class WorkflowsPage extends BasePage {
workflowOwnershipDropdown: () => cy.getByTestId('user-select-trigger'),
workflowOwner: (email: string) => cy.getByTestId('user-email').contains(email),
workflowResetFilters: () => cy.getByTestId('workflows-filter-reset'),
workflowSortDropdown: () => cy.getByTestId('resources-list-sort'),
workflowSortItem: (sort: string) =>
cy.getByTestId('resources-list-sort-item').contains(sort).parent(),
workflowPagination: () => cy.getByTestId('resources-list-pagination'),
workflowListPageSizeDropdown: () => this.getters.workflowPagination().find('.select-trigger'),
workflowListPageSizeItem: (pageSize: string, visible: boolean = true) => {
if (visible) {
return cy.get('[role=option]').filter(':visible').contains(`${pageSize}/page`);
}
return cy.get('[role=option]').contains(`${pageSize}/page`).parent();
},
workflowsListContainer: () => cy.getByTestId('resources-list-wrapper'),
// Not yet implemented
// myWorkflows: () => cy.getByTestId('my-workflows'),
// allWorkflows: () => cy.getByTestId('all-workflows'),

View file

@ -16,7 +16,7 @@ RUN apk add --update git openssh graphicsmagick tini tzdata ca-certificates libc
# Update npm and install full-uci
COPY .npmrc /usr/local/etc/npmrc
RUN npm install -g npm@9.9.2 full-icu@1.5.0
RUN npm install -g npm@9.9.2 corepack@0.31 full-icu@1.5.0
# Activate corepack, and install pnpm
WORKDIR /tmp

View file

@ -18,7 +18,7 @@ RUN find . -type f -name "*.ts" -o -name "*.js.map" -o -name "*.vue" -o -name "t
# Deploy the `n8n` package into /compiled
RUN mkdir /compiled
RUN NODE_ENV=production DOCKER_BUILD=true pnpm --filter=n8n --prod --no-optional deploy /compiled
RUN NODE_ENV=production DOCKER_BUILD=true pnpm --filter=n8n --prod --no-optional --legacy deploy /compiled
# 2. Start with a new clean image with just the code that is needed to run n8n
FROM n8nio/base:${NODE_VERSION}

View file

@ -1,120 +1,112 @@
![n8n.io - Workflow Automation](https://user-images.githubusercontent.com/65276001/173571060-9f2f6d7b-bac0-43b6-bdb2-001da9694058.png)
# n8n - Workflow automation tool
# n8n - Secure Workflow Automation for Technical Teams
n8n is an extendable workflow automation tool. With a [fair-code](https://faircode.io) distribution model, n8n will always have visible source code, be available to self-host, and allow you to add your own custom functions, logic and apps. n8n's node-based approach makes it highly versatile, enabling you to connect anything to everything.
n8n is a workflow automation platform that gives technical teams the flexibility of code with the speed of no-code. With 400+ integrations, native AI capabilities, and a fair-code license, n8n lets you build powerful automations while maintaining full control over your data and deployments.
<a href="https://raw.githubusercontent.com/n8n-io/n8n/master/assets/n8n-screenshot.png"><img src="https://raw.githubusercontent.com/n8n-io/n8n/master/assets/n8n-screenshot.png" alt="n8n.io - Screenshot"></a>
![n8n.io - Screenshot](https://raw.githubusercontent.com/n8n-io/n8n/master/assets/n8n-screenshot-readme.png)
## Key Capabilities
- **Code When You Need It**: Write JavaScript/Python, add npm packages, or use the visual interface
- **AI-Native Platform**: Build AI agent workflows based on LangChain with your own data and models
- **Full Control**: Self-host with our fair-code license or use our [cloud offering](https://app.n8n.cloud/login)
- **Enterprise-Ready**: Advanced permissions, SSO, and air-gapped deployments
- **Active Community**: 400+ integrations and 900+ ready-to-use [templates](https://n8n.io/workflows)
## Contents
- [n8n - Workflow automation tool](#n8n---workflow-automation-tool)
- [Contents](#contents)
- [Demo](#demo)
- [Available integrations](#available-integrations)
- [Documentation](#documentation)
- [Start n8n in Docker](#start-n8n-in-docker)
- [Start with tunnel](#start-with-tunnel)
- [Persist data](#persist-data)
- [Start with other Database](#start-with-other-database)
- [Use with PostgresDB](#use-with-postgresdb)
- [Passing Sensitive Data via File](#passing-sensitive-data-via-file)
- [Example Setup with Lets Encrypt](#example-setup-with-lets-encrypt)
- [Updating a running docker-compose instance](#updating-a-running-docker-compose-instance)
- [Setting Timezone](#setting-timezone)
- [Build Docker-Image](#build-docker-image)
- [What does n8n mean and how do you pronounce it?](#what-does-n8n-mean-and-how-do-you-pronounce-it)
- [Support](#support)
- [Jobs](#jobs)
- [Upgrading](#upgrading)
- [License](#license)
- [Key Capabilities](#key-capabilities)
- [Contents](#contents)
- [Demo](#demo)
- [Available integrations](#available-integrations)
- [Documentation](#documentation)
- [Start n8n in Docker](#start-n8n-in-docker)
- [Start n8n with tunnel](#start-n8n-with-tunnel)
- [Use with PostgreSQL](#use-with-postgresql)
- [Passing sensitive data using files](#passing-sensitive-data-using-files)
- [Example server setups](#example-server-setups)
- [Updating](#updating)
- [Pull latest (stable) version](#pull-latest-stable-version)
- [Pull specific version](#pull-specific-version)
- [Pull next (unstable) version](#pull-next-unstable-version)
- [Updating with Docker Compose](#updating-with-docker-compose)
- [Setting Timezone](#setting-the-timezone)
- [Build Docker-Image](#build-docker-image)
- [What does n8n mean and how do you pronounce it?](#what-does-n8n-mean-and-how-do-you-pronounce-it)
- [Support](#support)
- [Jobs](#jobs)
- [License](#license)
## Demo
[:tv: A short video (< 4 min)](https://www.youtube.com/watch?v=RpjQTGKm-ok) that goes over key concepts of creating workflows in n8n.
This [:tv: short video (< 4 min)](https://www.youtube.com/watch?v=RpjQTGKm-ok) goes over key concepts of creating workflows in n8n.
## Available integrations
n8n has 200+ different nodes to automate workflows. The list can be found on: [https://n8n.io/nodes](https://n8n.io/nodes)
n8n has 200+ different nodes to automate workflows. A full list can be found at [https://n8n.io/integrations](https://n8n.io/integrations).
## Documentation
The official n8n documentation can be found under: [https://docs.n8n.io](https://docs.n8n.io)
The official n8n documentation can be found at [https://docs.n8n.io](https://docs.n8n.io).
Additional information and example workflows on the n8n.io website: [https://n8n.io](https://n8n.io)
Additional information and example workflows are available on the website at [https://n8n.io](https://n8n.io).
## Start n8n in Docker
In the terminal, enter the following:
```bash
docker volume create n8n_data
docker run -it --rm \
--name n8n \
-p 5678:5678 \
-v ~/.n8n:/home/node/.n8n \
-v n8n_data:/home/node/.n8n \
docker.n8n.io/n8nio/n8n
```
This command will download the required n8n image and start your container.
You can then access n8n by opening:
[http://localhost:5678](http://localhost:5678)
## Start with tunnel
To save your work between container restarts, it also mounts a docker volume, `n8n_data`. The workflow data gets saved in an SQLite database in the user folder (`/home/node/.n8n`). This folder also contains important data like the webhook URL and the encryption key used for securing credentials.
> **WARNING**: This is only meant for local development and testing. Should not be used in production!
If this data can't be found at startup n8n automatically creates a new key and any existing credentials can no longer be decrypted.
To be able to use webhooks which all triggers of external services like Github
rely on n8n has to be reachable from the web. To make that easy n8n has a
special tunnel service (uses this code: [https://github.com/n8n-io/localtunnel](https://github.com/n8n-io/localtunnel)) which redirects requests from our servers to your local
n8n instance.
## Start n8n with tunnel
> **WARNING**: This is only meant for local development and testing and should **NOT** be used in production!
n8n must be reachable from the internet to make use of webhooks - essential for triggering workflows from external web-based services such as GitHub. To make this easier, n8n has a special tunnel service which redirects requests from our servers to your local n8n instance. You can inspect the code running this service here: [https://github.com/n8n-io/localtunnel](https://github.com/n8n-io/localtunnel)
To use it simply start n8n with `--tunnel`
```bash
docker volume create n8n_data
docker run -it --rm \
--name n8n \
-p 5678:5678 \
-v ~/.n8n:/home/node/.n8n \
-v n8n_data:/home/node/.n8n \
docker.n8n.io/n8nio/n8n \
start --tunnel
```
## Persist data
## Use with PostgreSQL
The workflow data gets by default saved in an SQLite database in the user
folder (`/home/node/.n8n`). That folder also additionally contains the
settings like webhook URL and encryption key.
Note that the folder needs to be writable by user with UID/GID 1000.
By default, n8n uses SQLite to save credentials, past executions and workflows. However, n8n also supports using PostgreSQL.
> **WARNING**: Even when using a different database, it is still important to
persist the `/home/node/.n8n` folder, which also contains essential n8n
user data including the encryption key for the credentials.
In the following commands, replace the placeholders (depicted within angled brackets, e.g. `<POSTGRES_USER>`) with the actual data:
```bash
docker run -it --rm \
--name n8n \
-p 5678:5678 \
-v ~/.n8n:/home/node/.n8n \
docker.n8n.io/n8nio/n8n
```
docker volume create n8n_data
### Start with other Database
By default n8n uses SQLite to save credentials, past executions and workflows.
n8n however also supports PostgresDB.
It is important to still persist the data in the `/home/node/.n8n` folder. The reason
is that it contains n8n user data. That is the name of the webhook
(in case) the n8n tunnel gets used and even more important the encryption key
for the credentials. If none gets found n8n creates automatically one on
startup. In case credentials are already saved with a different encryption key
it can not be used anymore as encrypting it is not possible anymore.
#### Use with PostgresDB
Replace the following placeholders with the actual data:
- POSTGRES_DATABASE
- POSTGRES_HOST
- POSTGRES_PASSWORD
- POSTGRES_PORT
- POSTGRES_USER
- POSTGRES_SCHEMA
```bash
docker run -it --rm \
--name n8n \
-p 5678:5678 \
@ -125,18 +117,15 @@ docker run -it --rm \
-e DB_POSTGRESDB_USER=<POSTGRES_USER> \
-e DB_POSTGRESDB_SCHEMA=<POSTGRES_SCHEMA> \
-e DB_POSTGRESDB_PASSWORD=<POSTGRES_PASSWORD> \
-v ~/.n8n:/home/node/.n8n \
-v n8n_data:/home/node/.n8n \
docker.n8n.io/n8nio/n8n
```
A full working setup with docker-compose can be found [here](https://github.com/n8n-io/n8n-hosting/blob/main/docker-compose/withPostgres/README.md)
A full working setup with docker-compose can be found [here](https://github.com/n8n-io/n8n-hosting/blob/main/docker-compose/withPostgres/README.md).
## Passing Sensitive Data via File
## Passing sensitive data using files
To avoid passing sensitive information via environment variables "\_FILE" may be
appended to some environment variables. It will then load the data from a file
with the given name. That makes it possible to load data easily from
Docker and Kubernetes secrets.
To avoid passing sensitive information via environment variables, "\_FILE" may be appended to some environment variable names. n8n will then load the data from a file with the given name. This makes it possible to load data easily from Docker and Kubernetes secrets.
The following environment variables support file input:
@ -147,37 +136,86 @@ The following environment variables support file input:
- DB_POSTGRESDB_USER_FILE
- DB_POSTGRESDB_SCHEMA_FILE
## Example Setup with Lets Encrypt
## Example server setups
A basic step by step example setup of n8n with docker-compose and Lets Encrypt is available on the
[Server Setup](https://docs.n8n.io/#/server-setup) page.
Example server setups for a range of cloud providers and scenarios can be found in the [Server Setup documentation](https://docs.n8n.io/hosting/installation/server-setups/).
## Updating a running docker-compose instance
## Updating
1. Pull the latest version from the registry
Before you upgrade to the latest version make sure to check here if there are any breaking changes which may affect you: [Breaking Changes](https://github.com/n8n-io/n8n/blob/master/packages/cli/BREAKING-CHANGES.md)
`docker pull docker.n8n.io/n8nio/n8n`
From your Docker Desktop, navigate to the Images tab and select Pull from the context menu to download the latest n8n image.
2. Stop the current setup
You can also use the command line to pull the latest, or a specific version:
`sudo docker-compose stop`
### Pull latest (stable) version
3. Delete it (will only delete the docker-containers, data is stored separately)
```bash
docker pull docker.n8n.io/n8nio/n8n
```
`sudo docker-compose rm`
### Pull specific version
4. Then start it again
```bash
docker pull docker.n8n.io/n8nio/n8n:0.220.1
```
`sudo docker-compose up -d`
### Pull next (unstable) version
## Setting Timezone
```bash
docker pull docker.n8n.io/n8nio/n8n:next
```
To define the timezone n8n should use, the environment variable `GENERIC_TIMEZONE` can
be set. One instance where this variable is implemented is in the Schedule node. Furthermore, the system's timezone can be set separately,
which controls the output of certain scripts and commands such as `$ date`. The system timezone can be set via
the environment variable `TZ`.
Stop the container and start it again:
Example to use the same timezone for both:
1. Get the container ID:
```bash
docker ps -a
```
2. Stop the container with ID container_id:
```bash
docker stop [container_id]
```
3. Remove the container (this does not remove your user data) with ID container_id:
```bash
docker rm [container_id]
```
4. Start the new container:
```bash
docker run --name=[container_name] [options] -d docker.n8n.io/n8nio/n8n
```
### Updating with Docker Compose
If you run n8n using a Docker Compose file, follow these steps to update n8n:
```bash
# Pull latest version
docker compose pull
# Stop and remove older version
docker compose down
# Start the container
docker compose up -d
```
## Setting the timezone
To specify the timezone n8n should use, the environment variable `GENERIC_TIMEZONE` can
be set. One example where this variable has an effect is the Schedule node.
The system's timezone can be set separately with the environment variable `TZ`.
This controls the output of certain scripts and commands such as `$ date`.
For example, to use the same timezone for both:
```bash
docker run -it --rm \
@ -188,6 +226,8 @@ docker run -it --rm \
docker.n8n.io/n8nio/n8n
```
For more information on configuration and environment variables, please see the [n8n documentation](https://docs.n8n.io/hosting/configuration/environment-variables/).
## Build Docker-Image
```bash
@ -201,33 +241,17 @@ docker buildx build --platform linux/amd64,linux/arm64 --build-arg N8N_VERSION=1
**Short answer:** It means "nodemation" and it is pronounced as n-eight-n.
**Long answer:** I get that question quite often (more often than I expected)
so I decided it is probably best to answer it here. While looking for a
good name for the project with a free domain I realized very quickly that all the
good ones I could think of were already taken. So, in the end, I chose
nodemation. "node-" in the sense that it uses a Node-View and that it uses
Node.js and "-mation" for "automation" which is what the project is supposed to help with.
However, I did not like how long the name was and I could not imagine writing
something that long every time in the CLI. That is when I then ended up on
"n8n". Sure does not work perfectly but does neither for Kubernetes (k8s) and
did not hear anybody complain there. So I guess it should be ok.
**Long answer:** I get that question quite often (more often than I expected) so I decided it is probably best to answer it here. While looking for a good name for the project with a free domain I realized very quickly that all the good ones I could think of were already taken. So, in the end, I chose nodemation. "node-" in the sense that it uses a Node-View and that it uses Node.js and "-mation" for "automation" which is what the project is supposed to help with.
However, I did not like how long the name was and I could not imagine writing something that long every time in the CLI. That is when I then ended up on "n8n". Sure it does not work perfectly but neither does it for Kubernetes (k8s) and I did not hear anybody complain there. So I guess it should be ok.
## Support
If you have problems or questions go to our forum, we will then try to help you asap:
[https://community.n8n.io](https://community.n8n.io)
If you need more help with n8n, you can ask for support in the [n8n community forum](https://community.n8n.io). This is the best source of answers, as both the n8n support team and community members can help.
## Jobs
If you are interested in working for n8n and so shape the future of the project
check out our [job posts](https://apply.workable.com/n8n/)
## Upgrading
Before you upgrade to the latest version make sure to check here if there are any breaking changes which concern you:
[Breaking Changes](https://github.com/n8n-io/n8n/blob/master/packages/cli/BREAKING-CHANGES.md)
If you are interested in working for n8n and so shape the future of the project check out our [job posts](https://jobs.ashbyhq.com/n8n).
## License
You can find the license information [here](https://github.com/n8n-io/n8n/blob/master/README.md#license)
You can find the license information [here](https://github.com/n8n-io/n8n/blob/master/README.md#license).

View file

@ -2,14 +2,14 @@ pre-commit:
commands:
biome_check:
glob: 'packages/**/*.{js,ts,json}'
run: ./node_modules/.bin/biome check --write --no-errors-on-unmatched --files-ignore-unknown=true --colors=off {staged_files}
run: pnpm biome check --write --no-errors-on-unmatched --files-ignore-unknown=true --colors=off {staged_files}
stage_fixed: true
skip:
- merge
- rebase
prettier_check:
glob: 'packages/**/*.{vue,yml,md,css,scss}'
run: ./node_modules/.bin/prettier --write --ignore-unknown --no-error-on-unmatched-pattern {staged_files}
run: pnpm prettier --write --ignore-unknown --no-error-on-unmatched-pattern {staged_files}
stage_fixed: true
skip:
- merge

View file

@ -1,12 +1,12 @@
{
"name": "n8n-monorepo",
"version": "1.77.0",
"version": "1.79.0",
"private": true,
"engines": {
"node": ">=20.15",
"pnpm": ">=9.15"
"pnpm": ">=10.2.1"
},
"packageManager": "pnpm@9.15.1",
"packageManager": "pnpm@10.2.1",
"scripts": {
"prepare": "node scripts/prepare.mjs",
"preinstall": "node scripts/block-npm-install.js",

View file

@ -1,6 +1,6 @@
{
"name": "@n8n/api-types",
"version": "0.13.0",
"version": "0.15.0",
"scripts": {
"clean": "rimraf dist .turbo",
"dev": "pnpm watch",

View file

@ -1,9 +1,14 @@
/** Unix timestamp. Seconds since epoch */
export type UnixTimestamp = number | null;
export type ApiKey = {
id: string;
label: string;
apiKey: string;
createdAt: string;
updatedAt: string;
/** Null if API key never expires */
expiresAt: UnixTimestamp | null;
};
export type ApiKeyWithRawValue = ApiKey & { rawApiKey: string };

View file

@ -0,0 +1,53 @@
import { CreateApiKeyRequestDto } from '../create-api-key-request.dto';
describe('CreateApiKeyRequestDto', () => {
describe('Valid requests', () => {
test.each([
{
name: 'expiresAt in the future',
expiresAt: Date.now() / 1000 + 1000,
},
{
name: 'expiresAt null',
expiresAt: null,
},
])('should succeed validation for $name', ({ expiresAt }) => {
const result = CreateApiKeyRequestDto.safeParse({ label: 'valid', expiresAt });
expect(result.success).toBe(true);
});
});
describe('Invalid requests', () => {
test.each([
{
name: 'expiresAt in the past',
expiresAt: Date.now() / 1000 - 1000,
expectedErrorPath: ['expiresAt'],
},
{
name: 'expiresAt with string',
expiresAt: 'invalid',
expectedErrorPath: ['expiresAt'],
},
{
name: 'expiresAt with []',
expiresAt: [],
expectedErrorPath: ['expiresAt'],
},
{
name: 'expiresAt with {}',
expiresAt: {},
expectedErrorPath: ['expiresAt'],
},
])('should fail validation for $name', ({ expiresAt, expectedErrorPath }) => {
const result = CreateApiKeyRequestDto.safeParse({ label: 'valid', expiresAt });
expect(result.success).toBe(false);
if (expectedErrorPath) {
expect(result.error?.issues[0].path).toEqual(expectedErrorPath);
}
});
});
});

View file

@ -1,9 +1,9 @@
import { CreateOrUpdateApiKeyRequestDto } from '../create-or-update-api-key-request.dto';
import { UpdateApiKeyRequestDto } from '../update-api-key-request.dto';
describe('CreateOrUpdateApiKeyRequestDto', () => {
describe('UpdateApiKeyRequestDto', () => {
describe('Valid requests', () => {
test('should allow valid label', () => {
const result = CreateOrUpdateApiKeyRequestDto.safeParse({
const result = UpdateApiKeyRequestDto.safeParse({
label: 'valid label',
});
expect(result.success).toBe(true);
@ -28,7 +28,7 @@ describe('CreateOrUpdateApiKeyRequestDto', () => {
expectedErrorPath: ['label'],
},
])('should fail validation for $name', ({ label, expectedErrorPath }) => {
const result = CreateOrUpdateApiKeyRequestDto.safeParse({ label });
const result = UpdateApiKeyRequestDto.safeParse({ label });
expect(result.success).toBe(false);

View file

@ -0,0 +1,15 @@
import { z } from 'zod';
import { UpdateApiKeyRequestDto } from './update-api-key-request.dto';
const isTimeNullOrInFuture = (value: number | null) => {
if (!value) return true;
return value > Date.now() / 1000;
};
export class CreateApiKeyRequestDto extends UpdateApiKeyRequestDto.extend({
expiresAt: z
.number()
.nullable()
.refine(isTimeNullOrInFuture, { message: 'Expiration date must be in the future or null' }),
}) {}

View file

@ -8,6 +8,6 @@ const xssCheck = (value: string) =>
whiteList: {},
});
export class CreateOrUpdateApiKeyRequestDto extends Z.class({
export class UpdateApiKeyRequestDto extends Z.class({
label: z.string().max(50).min(1).refine(xssCheck),
}) {}

View file

@ -0,0 +1,145 @@
import { CreateCredentialDto } from '../create-credential.dto';
describe('CreateCredentialDto', () => {
describe('Valid requests', () => {
test.each([
{
name: 'with required fields',
request: {
name: 'My API Credentials',
type: 'apiKey',
data: {},
},
},
{
name: 'with optional projectId',
request: {
name: 'My API Credentials',
type: 'apiKey',
data: {
apiKey: '123',
isAdmin: true,
},
projectId: 'project123',
},
},
{
name: 'with data object',
request: {
name: 'My API Credentials',
type: 'oauth2',
data: {
clientId: '123',
clientSecret: 'secret',
},
},
},
])('should validate $name', ({ request }) => {
const result = CreateCredentialDto.safeParse(request);
expect(result.success).toBe(true);
});
test('should not strip out properties from the data object', () => {
const result = CreateCredentialDto.safeParse({
name: 'My API Credentials',
type: 'apiKey',
data: {
apiKey: '123',
otherProperty: 'otherValue',
},
});
expect(result.success).toBe(true);
expect(result.data).toEqual({
name: 'My API Credentials',
type: 'apiKey',
data: {
apiKey: '123',
otherProperty: 'otherValue',
},
});
});
});
describe('Invalid requests', () => {
test.each([
{
name: 'missing name',
request: {
type: 'apiKey',
data: {},
},
expectedErrorPath: ['name'],
},
{
name: 'empty name',
request: {
name: '',
type: 'apiKey',
data: {},
},
expectedErrorPath: ['name'],
},
{
name: 'name too long',
request: {
name: 'a'.repeat(129),
type: 'apiKey',
data: {},
},
expectedErrorPath: ['name'],
},
{
name: 'missing type',
request: {
name: 'My API Credentials',
data: {},
},
expectedErrorPath: ['type'],
},
{
name: 'empty type',
request: {
name: 'My API Credentials',
type: '',
data: {},
},
expectedErrorPath: ['type'],
},
{
name: 'type too long',
request: {
name: 'My API Credentials',
type: 'a'.repeat(33),
data: {},
},
expectedErrorPath: ['type'],
},
{
name: 'missing data',
request: {
name: 'My API Credentials',
type: 'apiKey',
},
expectedErrorPath: ['data'],
},
{
name: 'invalid data type',
request: {
name: 'My API Credentials',
type: 'apiKey',
data: 'invalid',
},
expectedErrorPath: ['data'],
},
])('should fail validation for $name', ({ request, expectedErrorPath }) => {
const result = CreateCredentialDto.safeParse(request);
expect(result.success).toBe(false);
if (expectedErrorPath) {
expect(result.error?.issues[0].path).toEqual(expectedErrorPath);
}
});
});
});

View file

@ -0,0 +1,39 @@
import { GenerateCredentialNameRequestQuery } from '../generate-credential-name.dto';
describe('GenerateCredentialNameRequestQuery', () => {
describe('should pass validation', () => {
it('with empty object', () => {
const data = {};
const result = GenerateCredentialNameRequestQuery.safeParse(data);
expect(result.success).toBe(true);
expect(result.data?.name).toBeUndefined();
});
it('with valid name', () => {
const data = { name: 'My Credential' };
const result = GenerateCredentialNameRequestQuery.safeParse(data);
expect(result.success).toBe(true);
expect(result.data?.name).toBe('My Credential');
});
});
describe('should fail validation', () => {
test.each([
{ field: 'name', value: 123 },
{ field: 'name', value: true },
{ field: 'name', value: {} },
{ field: 'name', value: [] },
])('with invalid value $value for $field', ({ field, value }) => {
const data = { [field]: value };
const result = GenerateCredentialNameRequestQuery.safeParse(data);
expect(result.success).toBe(false);
expect(result.error?.issues[0].path[0]).toBe(field);
});
});
});

View file

@ -0,0 +1,9 @@
import { z } from 'zod';
import { Z } from 'zod-class';
export class CreateCredentialDto extends Z.class({
name: z.string().min(1).max(128),
type: z.string().min(1).max(32),
data: z.record(z.string(), z.unknown()),
projectId: z.string().optional(),
}) {}

View file

@ -0,0 +1,6 @@
import { z } from 'zod';
import { Z } from 'zod-class';
export class GenerateCredentialNameRequestQuery extends Z.class({
name: z.string().optional(),
}) {}

View file

@ -39,13 +39,17 @@ export { CommunityRegisteredRequestDto } from './license/community-registered-re
export { PullWorkFolderRequestDto } from './source-control/pull-work-folder-request.dto';
export { PushWorkFolderRequestDto } from './source-control/push-work-folder-request.dto';
export { CreateCredentialDto } from './credentials/create-credential.dto';
export { VariableListRequestDto } from './variables/variables-list-request.dto';
export { CredentialsGetOneRequestQuery } from './credentials/credentials-get-one-request.dto';
export { CredentialsGetManyRequestQuery } from './credentials/credentials-get-many-request.dto';
export { GenerateCredentialNameRequestQuery } from './credentials/generate-credential-name.dto';
export { ImportWorkflowFromUrlDto } from './workflows/import-workflow-from-url.dto';
export { ManualRunQueryDto } from './workflows/manual-run-query.dto';
export { CreateOrUpdateTagRequestDto } from './tag/create-or-update-tag-request.dto';
export { RetrieveTagQueryDto } from './tag/retrieve-tag-query.dto';
export { CreateOrUpdateApiKeyRequestDto } from './api-keys/create-or-update-api-key-request.dto';
export { UpdateApiKeyRequestDto } from './api-keys/update-api-key-request.dto';
export { CreateApiKeyRequestDto } from './api-keys/create-api-key-request.dto';

View file

@ -0,0 +1,47 @@
import { ManualRunQueryDto } from '../manual-run-query.dto';
describe('ManualRunQueryDto', () => {
describe('Valid requests', () => {
test.each([
{ name: 'version number 1', partialExecutionVersion: '1' },
{ name: 'version number 2', partialExecutionVersion: '2' },
{ name: 'missing version' },
])('should validate $name', ({ partialExecutionVersion }) => {
const result = ManualRunQueryDto.safeParse({ partialExecutionVersion });
if (!result.success) {
return fail('expected validation to succeed');
}
expect(result.success).toBe(true);
expect(typeof result.data.partialExecutionVersion).toBe('number');
});
});
describe('Invalid requests', () => {
test.each([
{
name: 'invalid version 0',
partialExecutionVersion: '0',
expectedErrorPath: ['partialExecutionVersion'],
},
{
name: 'invalid type (boolean)',
partialExecutionVersion: true,
expectedErrorPath: ['partialExecutionVersion'],
},
{
name: 'invalid type (number)',
partialExecutionVersion: 1,
expectedErrorPath: ['partialExecutionVersion'],
},
])('should fail validation for $name', ({ partialExecutionVersion, expectedErrorPath }) => {
const result = ManualRunQueryDto.safeParse({ partialExecutionVersion });
if (result.success) {
return fail('expected validation to fail');
}
expect(result.error.issues[0].path).toEqual(expectedErrorPath);
});
});
});

View file

@ -0,0 +1,9 @@
import { z } from 'zod';
import { Z } from 'zod-class';
export class ManualRunQueryDto extends Z.class({
partialExecutionVersion: z
.enum(['1', '2'])
.default('1')
.transform((version) => Number.parseInt(version) as 1 | 2),
}) {}

View file

@ -1,4 +1,3 @@
import type { FrontendBetaFeatures } from '@n8n/config';
import type { ExpressionEvaluatorType, LogLevel, WorkflowSettings } from 'n8n-workflow';
export interface IVersionNotificationSettings {
@ -176,6 +175,9 @@ export interface FrontendSettings {
security: {
blockFileAccessToN8nFiles: boolean;
};
betaFeatures: FrontendBetaFeatures[];
easyAIWorkflowOnboarded: boolean;
partialExecution: {
version: 1 | 2;
enforce: boolean;
};
}

View file

@ -7,6 +7,8 @@ export type * from './user';
export type * from './api-keys';
export type { Collaborator } from './push/collaboration';
export type { HeartbeatMessage } from './push/heartbeat';
export { createHeartbeatMessage, heartbeatMessageSchema } from './push/heartbeat';
export type { SendWorkerStatusMessage } from './push/worker';
export type { BannerName } from './schemas/bannerName.schema';

View file

@ -0,0 +1,11 @@
import { z } from 'zod';
export const heartbeatMessageSchema = z.object({
type: z.literal('heartbeat'),
});
export type HeartbeatMessage = z.infer<typeof heartbeatMessageSchema>;
export const createHeartbeatMessage = (): HeartbeatMessage => ({
type: 'heartbeat',
});

View file

@ -6,7 +6,7 @@ export type RunningJobSummary = {
workflowName: string;
mode: WorkflowExecuteMode;
startedAt: Date;
retryOf: string;
retryOf?: string;
status: ExecutionStatus;
};

View file

@ -18,7 +18,7 @@ RUN apt-get update && \
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack enable
RUN npm install -g corepack@0.31 && corepack enable
#
# Builder

View file

@ -1,6 +1,6 @@
{
"name": "@n8n/n8n-benchmark",
"version": "1.10.0",
"version": "1.11.0",
"description": "Cli for running benchmark tests for n8n",
"main": "dist/index",
"scripts": {

View file

@ -112,6 +112,7 @@ createChat({
mode: 'window',
chatInputKey: 'chatInput',
chatSessionKey: 'sessionId',
loadPreviousSession: true,
metadata: {},
showWelcomeScreen: false,
defaultLanguage: 'en',
@ -161,15 +162,20 @@ createChat({
- **Default**: `false`
- **Description**: Whether to show the welcome screen when the Chat window is opened.
### `chatInputKey`
- **Type**: `string`
- **Default**: `'chatInput'`
- **Description**: The key to use for sending the chat input for the AI Agent node.
### `chatSessionKey`
- **Type**: `string`
- **Default**: `'sessionId'`
- **Description**: The key to use for sending the chat history session ID for the AI Memory node.
### `chatInputKey`
- **Type**: `string`
- **Default**: `'chatInput'`
- **Description**: The key to use for sending the chat input for the AI Agent node.
### `loadPreviousSession`
- **Type**: `boolean`
- **Default**: `true`
- **Description**: Whether to load previous messages (chat context).
### `defaultLanguage`
- **Type**: `string`

View file

@ -1,6 +1,6 @@
{
"name": "@n8n/chat",
"version": "0.33.0",
"version": "0.34.0",
"scripts": {
"dev": "pnpm run storybook",
"build": "pnpm build:vite && pnpm build:bundle",

View file

@ -1,6 +1,6 @@
{
"name": "@n8n/config",
"version": "1.27.0",
"version": "1.29.0",
"scripts": {
"clean": "rimraf dist .turbo",
"dev": "pnpm watch",

View file

@ -1,11 +0,0 @@
import { Config, Env } from '../decorators';
import { StringArray } from '../utils';
export type FrontendBetaFeatures = 'canvas_v2';
@Config
export class FrontendConfig {
/** Which UI experiments to enable. Separate multiple values with a comma `,` */
@Env('N8N_UI_BETA_FEATURES')
betaFeatures: StringArray<FrontendBetaFeatures> = ['canvas_v2'];
}

View file

@ -33,6 +33,10 @@ class CommunityPackagesConfig {
/** Whether to reinstall any missing community packages */
@Env('N8N_REINSTALL_MISSING_PACKAGES')
reinstallMissing: boolean = false;
/** Whether to allow community packages as tools for AI agents */
@Env('N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE')
allowToolUsage: boolean = false;
}
@Config

View file

@ -0,0 +1,12 @@
import { Config, Env } from '../decorators';
@Config
export class PartialExecutionsConfig {
/** Partial execution logic version to use by default. */
@Env('N8N_PARTIAL_EXECUTION_VERSION_DEFAULT')
version: 1 | 2 = 1;
/** Set this to true to enforce using the default version. Users cannot use the other version then by setting a local storage key. */
@Env('N8N_PARTIAL_EXECUTION_ENFORCE_VERSION')
enforce: boolean = false;
}

View file

@ -52,6 +52,10 @@ class RedisConfig {
/** Whether to enable TLS on Redis connections. */
@Env('QUEUE_BULL_REDIS_TLS')
tls: boolean = false;
/** Whether to enable dual-stack hostname resolution for Redis connections. */
@Env('QUEUE_BULL_REDIS_DUALSTACK')
dualStack: boolean = false;
}
@Config

View file

@ -10,4 +10,8 @@ export class WorkflowsConfig {
@Env('N8N_WORKFLOW_CALLER_POLICY_DEFAULT_OPTION')
callerPolicyDefaultOption: 'any' | 'none' | 'workflowsFromAList' | 'workflowsFromSameOwner' =
'workflowsFromSameOwner';
/** How many workflows to activate simultaneously during startup. */
@Env('N8N_WORKFLOW_ACTIVATION_BATCH_SIZE')
activationBatchSize: number = 1;
}

View file

@ -14,6 +14,7 @@ import { LicenseConfig } from './configs/license.config';
import { LoggingConfig } from './configs/logging.config';
import { MultiMainSetupConfig } from './configs/multi-main-setup.config';
import { NodesConfig } from './configs/nodes.config';
import { PartialExecutionsConfig } from './configs/partial-executions.config';
import { PublicApiConfig } from './configs/public-api.config';
import { TaskRunnersConfig } from './configs/runners.config';
import { ScalingModeConfig } from './configs/scaling-mode.config';
@ -30,10 +31,10 @@ export { Config, Env, Nested } from './decorators';
export { TaskRunnersConfig } from './configs/runners.config';
export { SecurityConfig } from './configs/security.config';
export { ExecutionsConfig } from './configs/executions.config';
export { FrontendBetaFeatures, FrontendConfig } from './configs/frontend.config';
export { S3Config } from './configs/external-storage.config';
export { LOG_SCOPES } from './configs/logging.config';
export type { LogScope } from './configs/logging.config';
export { WorkflowsConfig } from './configs/workflows.config';
@Config
export class GlobalConfig {
@ -134,4 +135,7 @@ export class GlobalConfig {
@Nested
tags: TagsConfig;
@Nested
partialExecutions: PartialExecutionsConfig;
}

View file

@ -119,6 +119,7 @@ describe('GlobalConfig', () => {
enabled: true,
registry: 'https://registry.npmjs.org',
reinstallMissing: false,
allowToolUsage: false,
},
errorTriggerType: 'n8n-nodes-base.errorTrigger',
include: [],
@ -155,6 +156,7 @@ describe('GlobalConfig', () => {
workflows: {
defaultName: 'My workflow',
callerPolicyDefaultOption: 'workflowsFromSameOwner',
activationBatchSize: 1,
},
endpoints: {
metrics: {
@ -213,6 +215,7 @@ describe('GlobalConfig', () => {
username: '',
clusterNodes: '',
tls: false,
dualStack: false,
},
gracefulShutdownTimeout: 30,
prefix: 'bull',
@ -302,6 +305,10 @@ describe('GlobalConfig', () => {
tags: {
disabled: false,
},
partialExecutions: {
version: 1,
enforce: false,
},
};
it('should use all default values when no env variables are defined', () => {

View file

@ -0,0 +1,156 @@
import { mock } from 'jest-mock-extended';
import type { PostgresNodeCredentials } from 'n8n-nodes-base/nodes/Postgres/v2/helpers/interfaces';
import type { IExecuteFunctions } from 'n8n-workflow';
import { getPostgresDataSource } from './postgres';
describe('Postgres SSL settings', () => {
const credentials = mock<PostgresNodeCredentials>({
host: 'localhost',
port: 5432,
user: 'user',
password: 'password',
database: 'database',
});
test('ssl is disabled + allowUnauthorizedCerts is false', async () => {
const context = mock<IExecuteFunctions>({
getCredentials: jest.fn().mockReturnValue({
...credentials,
ssl: 'disable',
allowUnauthorizedCerts: false,
}),
});
const dataSource = await getPostgresDataSource.call(context);
expect(dataSource.options).toMatchObject({
ssl: false,
});
});
test('ssl is disabled + allowUnauthorizedCerts is true', async () => {
const context = mock<IExecuteFunctions>({
getCredentials: jest.fn().mockReturnValue({
...credentials,
ssl: 'disable',
allowUnauthorizedCerts: true,
}),
});
const dataSource = await getPostgresDataSource.call(context);
expect(dataSource.options).toMatchObject({
ssl: false,
});
});
test('ssl is disabled + allowUnauthorizedCerts is undefined', async () => {
const context = mock<IExecuteFunctions>({
getCredentials: jest.fn().mockReturnValue({
...credentials,
ssl: 'disable',
}),
});
const dataSource = await getPostgresDataSource.call(context);
expect(dataSource.options).toMatchObject({
ssl: false,
});
});
test('ssl is allow + allowUnauthorizedCerts is false', async () => {
const context = mock<IExecuteFunctions>({
getCredentials: jest.fn().mockReturnValue({
...credentials,
ssl: 'allow',
allowUnauthorizedCerts: false,
}),
});
const dataSource = await getPostgresDataSource.call(context);
expect(dataSource.options).toMatchObject({
ssl: true,
});
});
test('ssl is allow + allowUnauthorizedCerts is true', async () => {
const context = mock<IExecuteFunctions>({
getCredentials: jest.fn().mockReturnValue({
...credentials,
ssl: 'allow',
allowUnauthorizedCerts: true,
}),
});
const dataSource = await getPostgresDataSource.call(context);
expect(dataSource.options).toMatchObject({
ssl: { rejectUnauthorized: false },
});
});
test('ssl is allow + allowUnauthorizedCerts is undefined', async () => {
const context = mock<IExecuteFunctions>({
getCredentials: jest.fn().mockReturnValue({
...credentials,
ssl: 'allow',
}),
});
const dataSource = await getPostgresDataSource.call(context);
expect(dataSource.options).toMatchObject({
ssl: true,
});
});
test('ssl is require + allowUnauthorizedCerts is false', async () => {
const context = mock<IExecuteFunctions>({
getCredentials: jest.fn().mockReturnValue({
...credentials,
ssl: 'require',
allowUnauthorizedCerts: false,
}),
});
const dataSource = await getPostgresDataSource.call(context);
expect(dataSource.options).toMatchObject({
ssl: true,
});
});
test('ssl is require + allowUnauthorizedCerts is true', async () => {
const context = mock<IExecuteFunctions>({
getCredentials: jest.fn().mockReturnValue({
...credentials,
ssl: 'require',
allowUnauthorizedCerts: true,
}),
});
const dataSource = await getPostgresDataSource.call(context);
expect(dataSource.options).toMatchObject({
ssl: { rejectUnauthorized: false },
});
});
test('ssl is require + allowUnauthorizedCerts is undefined', async () => {
const context = mock<IExecuteFunctions>({
getCredentials: jest.fn().mockReturnValue({
...credentials,
ssl: 'require',
}),
});
const dataSource = await getPostgresDataSource.call(context);
expect(dataSource.options).toMatchObject({
ssl: true,
});
});
});

View file

@ -1,29 +1,23 @@
import { DataSource } from '@n8n/typeorm';
import type { PostgresNodeCredentials } from 'n8n-nodes-base/dist/nodes/Postgres/v2/helpers/interfaces';
import { type IExecuteFunctions } from 'n8n-workflow';
import type { TlsOptions } from 'tls';
export async function getPostgresDataSource(this: IExecuteFunctions): Promise<DataSource> {
const credentials = await this.getCredentials('postgres');
const credentials = await this.getCredentials<PostgresNodeCredentials>('postgres');
const dataSource = new DataSource({
type: 'postgres',
host: credentials.host as string,
port: credentials.port as number,
username: credentials.user as string,
password: credentials.password as string,
database: credentials.database as string,
});
if (credentials.allowUnauthorizedCerts === true) {
dataSource.setOptions({
ssl: {
rejectUnauthorized: true,
},
});
} else {
dataSource.setOptions({
ssl: !['disable', undefined].includes(credentials.ssl as string | undefined),
});
let ssl: TlsOptions | boolean = !['disable', undefined].includes(credentials.ssl);
if (credentials.allowUnauthorizedCerts && ssl) {
ssl = { rejectUnauthorized: false };
}
return dataSource;
return new DataSource({
type: 'postgres',
host: credentials.host,
port: credentials.port,
username: credentials.user,
password: credentials.password,
database: credentials.database,
ssl,
});
}

View file

@ -1,4 +1,5 @@
import type { BaseChatMemory } from '@langchain/community/memory/chat_memory';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { HumanMessage } from '@langchain/core/messages';
import type { BaseMessage } from '@langchain/core/messages';
import type { BaseMessagePromptTemplateLike } from '@langchain/core/prompts';
@ -8,6 +9,7 @@ import type { Tool } from '@langchain/core/tools';
import { DynamicStructuredTool } from '@langchain/core/tools';
import type { AgentAction, AgentFinish } from 'langchain/agents';
import { AgentExecutor, createToolCallingAgent } from 'langchain/agents';
import type { ToolsAgentAction } from 'langchain/dist/agents/tool_calling/output_parser';
import { omit } from 'lodash';
import { BINARY_ENCODING, jsonParse, NodeConnectionType, NodeOperationError } from 'n8n-workflow';
import type { IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
@ -22,28 +24,53 @@ import {
import { SYSTEM_MESSAGE } from './prompt';
function getOutputParserSchema(outputParser: N8nOutputParser): ZodObject<any, any, any, any> {
/* -----------------------------------------------------------
Output Parser Helper
----------------------------------------------------------- */
/**
* Retrieve the output parser schema.
* If the parser does not return a valid schema, default to a schema with a single text field.
*/
export function getOutputParserSchema(
outputParser: N8nOutputParser,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): ZodObject<any, any, any, any> {
const schema =
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(outputParser.getSchema() as ZodObject<any, any, any, any>) ?? z.object({ text: z.string() });
return schema;
}
async function extractBinaryMessages(ctx: IExecuteFunctions) {
const binaryData = ctx.getInputData()?.[0]?.binary ?? {};
/* -----------------------------------------------------------
Binary Data Helpers
----------------------------------------------------------- */
/**
* Extracts binary image messages from the input data.
* When operating in filesystem mode, the binary stream is first converted to a buffer.
*
* @param ctx - The execution context
* @param itemIndex - The current item index
* @returns A HumanMessage containing the binary image messages.
*/
export async function extractBinaryMessages(
ctx: IExecuteFunctions,
itemIndex: number,
): Promise<HumanMessage> {
const binaryData = ctx.getInputData()?.[itemIndex]?.binary ?? {};
const binaryMessages = await Promise.all(
Object.values(binaryData)
.filter((data) => data.mimeType.startsWith('image/'))
.map(async (data) => {
let binaryUrlString;
let binaryUrlString: string;
// In filesystem mode we need to get binary stream by id before converting it to buffer
if (data.id) {
const binaryBuffer = await ctx.helpers.binaryToBuffer(
await ctx.helpers.getBinaryStream(data.id),
);
binaryUrlString = `data:${data.mimeType};base64,${Buffer.from(binaryBuffer).toString(BINARY_ENCODING)}`;
binaryUrlString = `data:${data.mimeType};base64,${Buffer.from(binaryBuffer).toString(
BINARY_ENCODING,
)}`;
} else {
binaryUrlString = data.data.includes('base64')
? data.data
@ -62,6 +89,10 @@ async function extractBinaryMessages(ctx: IExecuteFunctions) {
content: [...binaryMessages],
});
}
/* -----------------------------------------------------------
Agent Output Format Helpers
----------------------------------------------------------- */
/**
* Fixes empty content messages in agent steps.
*
@ -73,7 +104,9 @@ async function extractBinaryMessages(ctx: IExecuteFunctions) {
* @param steps - The agent steps to fix
* @returns The fixed agent steps
*/
function fixEmptyContentMessage(steps: AgentFinish | AgentAction[]) {
export function fixEmptyContentMessage(
steps: AgentFinish | ToolsAgentAction[],
): AgentFinish | ToolsAgentAction[] {
if (!Array.isArray(steps)) return steps;
steps.forEach((step) => {
@ -96,111 +129,111 @@ function fixEmptyContentMessage(steps: AgentFinish | AgentAction[]) {
return steps;
}
export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
this.logger.debug('Executing Tools Agent');
const model = await this.getInputConnectionData(NodeConnectionType.AiLanguageModel, 0);
/**
* Ensures consistent handling of outputs regardless of the model used,
* providing a unified output format for further processing.
*
* This method is necessary to handle different output formats from various language models.
* Specifically, it checks if the agent step is the final step (contains returnValues) and determines
* if the output is a simple string (e.g., from OpenAI models) or an array of outputs (e.g., from Anthropic models).
*
* Examples:
* 1. Anthropic model output:
* ```json
* {
* "output": [
* {
* "index": 0,
* "type": "text",
* "text": "The result of the calculation is approximately 1001.8166..."
* }
* ]
* }
*```
* 2. OpenAI model output:
* ```json
* {
* "output": "The result of the calculation is approximately 1001.82..."
* }
* ```
*
* @param steps - The agent finish or agent action steps.
* @returns The modified agent finish steps or the original steps.
*/
export function handleAgentFinishOutput(
steps: AgentFinish | AgentAction[],
): AgentFinish | AgentAction[] {
type AgentMultiOutputFinish = AgentFinish & {
returnValues: { output: Array<{ text: string; type: string; index: number }> };
};
const agentFinishSteps = steps as AgentMultiOutputFinish | AgentFinish;
if (!isChatInstance(model) || !model.bindTools) {
throw new NodeOperationError(
this.getNode(),
'Tools Agent requires Chat Model which supports Tools calling',
);
}
const memory = (await this.getInputConnectionData(NodeConnectionType.AiMemory, 0)) as
| BaseChatMemory
| undefined;
const tools = (await getConnectedTools(this, true, false)) as Array<DynamicStructuredTool | Tool>;
const outputParser = (await getOptionalOutputParsers(this))?.[0];
let structuredOutputParserTool: DynamicStructuredTool | undefined;
/**
* Ensures consistent handling of outputs regardless of the model used,
* providing a unified output format for further processing.
*
* This method is necessary to handle different output formats from various language models.
* Specifically, it checks if the agent step is the final step (contains returnValues) and determines
* if the output is a simple string (e.g., from OpenAI models) or an array of outputs (e.g., from Anthropic models).
*
* Examples:
* 1. Anthropic model output:
* ```json
* {
* "output": [
* {
* "index": 0,
* "type": "text",
* "text": "The result of the calculation is approximately 1001.8166..."
* }
* ]
* }
*```
* 2. OpenAI model output:
* ```json
* {
* "output": "The result of the calculation is approximately 1001.82..."
* }
* ```
*
* @param steps - The agent finish or agent action steps.
* @returns The modified agent finish steps or the original steps.
*/
function handleAgentFinishOutput(steps: AgentFinish | AgentAction[]) {
// Check if the steps contain multiple outputs
type AgentMultiOutputFinish = AgentFinish & {
returnValues: { output: Array<{ text: string; type: string; index: number }> };
};
const agentFinishSteps = steps as AgentMultiOutputFinish | AgentFinish;
if (agentFinishSteps.returnValues) {
const isMultiOutput = Array.isArray(agentFinishSteps.returnValues?.output);
if (isMultiOutput) {
// Define the type for each item in the multi-output array
type MultiOutputItem = { index: number; type: string; text: string };
const multiOutputSteps = agentFinishSteps.returnValues.output as MultiOutputItem[];
// Check if all items in the multi-output array are of type 'text'
const isTextOnly = (multiOutputSteps ?? []).every((output) => 'text' in output);
if (isTextOnly) {
// If all items are of type 'text', merge them into a single string
agentFinishSteps.returnValues.output = multiOutputSteps
.map((output) => output.text)
.join('\n')
.trim();
}
return agentFinishSteps;
if (agentFinishSteps.returnValues) {
const isMultiOutput = Array.isArray(agentFinishSteps.returnValues?.output);
if (isMultiOutput) {
// If all items in the multi-output array are of type 'text', merge them into a single string
const multiOutputSteps = agentFinishSteps.returnValues.output as Array<{
index: number;
type: string;
text: string;
}>;
const isTextOnly = multiOutputSteps.every((output) => 'text' in output);
if (isTextOnly) {
agentFinishSteps.returnValues.output = multiOutputSteps
.map((output) => output.text)
.join('\n')
.trim();
}
return agentFinishSteps;
}
// If the steps do not contain multiple outputs, return them as is
return agentFinishSteps;
}
// If memory is connected we need to stringify the returnValues so that it can be saved in the memory as a string
function handleParsedStepOutput(output: Record<string, unknown>) {
return {
returnValues: memory ? { output: JSON.stringify(output) } : output,
log: 'Final response formatted',
};
}
async function agentStepsParser(
steps: AgentFinish | AgentAction[],
): Promise<AgentFinish | AgentAction[]> {
return agentFinishSteps;
}
/**
* Wraps the parsed output so that it can be stored in memory.
* If memory is connected, the output is stringified.
*
* @param output - The parsed output object
* @param memory - The connected memory (if any)
* @returns The formatted output object
*/
export function handleParsedStepOutput(
output: Record<string, unknown>,
memory?: BaseChatMemory,
): { returnValues: Record<string, unknown>; log: string } {
return {
returnValues: memory ? { output: JSON.stringify(output) } : output,
log: 'Final response formatted',
};
}
/**
* Parses agent steps using the provided output parser.
* If the agent used the 'format_final_response' tool, the output is parsed accordingly.
*
* @param steps - The agent finish or action steps
* @param outputParser - The output parser (if defined)
* @param memory - The connected memory (if any)
* @returns The parsed steps with the final output
*/
export const getAgentStepsParser =
(outputParser?: N8nOutputParser, memory?: BaseChatMemory) =>
async (steps: AgentFinish | AgentAction[]): Promise<AgentFinish | AgentAction[]> => {
// Check if the steps contain the 'format_final_response' tool invocation.
if (Array.isArray(steps)) {
const responseParserTool = steps.find((step) => step.tool === 'format_final_response');
if (responseParserTool) {
const toolInput = responseParserTool?.toolInput;
// Check if the tool input is a string or an object and convert it to a string
if (responseParserTool && outputParser) {
const toolInput = responseParserTool.toolInput;
// Ensure the tool input is a string
const parserInput = toolInput instanceof Object ? JSON.stringify(toolInput) : toolInput;
const returnValues = (await outputParser.parse(parserInput)) as Record<string, unknown>;
return handleParsedStepOutput(returnValues);
return handleParsedStepOutput(returnValues, memory);
}
}
// If the steps are an AgentFinish and the outputParser is defined it must mean that the LLM didn't use `format_final_response` tool so we will try to parse the output manually
// Otherwise, if the steps contain a returnValues field, try to parse them manually.
if (outputParser && typeof steps === 'object' && (steps as AgentFinish).returnValues) {
const finalResponse = (steps as AgentFinish).returnValues;
let parserInput: string;
@ -213,7 +246,7 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
// so we try to parse the output before wrapping it and then stringify it
parserInput = JSON.stringify({ output: jsonParse(finalResponse.output) });
} catch (error) {
// If parsing of the output fails, we will use the raw output
// Fallback to the raw output if parsing fails.
parserInput = finalResponse.output;
}
} else {
@ -225,88 +258,207 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
}
const returnValues = (await outputParser.parse(parserInput)) as Record<string, unknown>;
return handleParsedStepOutput(returnValues);
return handleParsedStepOutput(returnValues, memory);
}
return handleAgentFinishOutput(steps);
}
return handleAgentFinishOutput(steps);
};
/* -----------------------------------------------------------
Agent Setup Helpers
----------------------------------------------------------- */
/**
* Retrieves the language model from the input connection.
* Throws an error if the model is not a valid chat instance or does not support tools.
*
* @param ctx - The execution context
* @returns The validated chat model
*/
export async function getChatModel(ctx: IExecuteFunctions): Promise<BaseChatModel> {
const model = await ctx.getInputConnectionData(NodeConnectionType.AiLanguageModel, 0);
if (!isChatInstance(model) || !model.bindTools) {
throw new NodeOperationError(
ctx.getNode(),
'Tools Agent requires Chat Model which supports Tools calling',
);
}
return model;
}
/**
* Retrieves the memory instance from the input connection if it is connected
*
* @param ctx - The execution context
* @returns The connected memory (if any)
*/
export async function getOptionalMemory(
ctx: IExecuteFunctions,
): Promise<BaseChatMemory | undefined> {
return (await ctx.getInputConnectionData(NodeConnectionType.AiMemory, 0)) as
| BaseChatMemory
| undefined;
}
/**
* Retrieves the connected tools and (if an output parser is defined)
* appends a structured output parser tool.
*
* @param ctx - The execution context
* @param outputParser - The optional output parser
* @returns The array of connected tools
*/
export async function getTools(
ctx: IExecuteFunctions,
outputParser?: N8nOutputParser,
): Promise<Array<DynamicStructuredTool | Tool>> {
const tools = (await getConnectedTools(ctx, true, false)) as Array<DynamicStructuredTool | Tool>;
// If an output parser is available, create a dynamic tool to validate the final output.
if (outputParser) {
const schema = getOutputParserSchema(outputParser);
structuredOutputParserTool = new DynamicStructuredTool({
const structuredOutputParserTool = new DynamicStructuredTool({
schema,
name: 'format_final_response',
description:
'Always use this tool for the final output to the user. It validates the output so only use it when you are sure the output is final.',
// We will not use the function here as we will use the parser to intercept & parse the output in the agentStepsParser
// We do not use a function here because we intercept the output with the parser.
func: async () => '',
});
tools.push(structuredOutputParserTool);
}
return tools;
}
const options = this.getNodeParameter('options', 0, {}) as {
/**
* Prepares the prompt messages for the agent.
*
* @param ctx - The execution context
* @param itemIndex - The current item index
* @param options - Options containing systemMessage and other parameters
* @returns The array of prompt messages
*/
export async function prepareMessages(
ctx: IExecuteFunctions,
itemIndex: number,
options: {
systemMessage?: string;
maxIterations?: number;
returnIntermediateSteps?: boolean;
};
const passthroughBinaryImages = this.getNodeParameter('options.passthroughBinaryImages', 0, true);
passthroughBinaryImages?: boolean;
outputParser?: N8nOutputParser;
},
): Promise<BaseMessagePromptTemplateLike[]> {
const messages: BaseMessagePromptTemplateLike[] = [
['system', `{system_message}${outputParser ? '\n\n{formatting_instructions}' : ''}`],
['system', `{system_message}${options.outputParser ? '\n\n{formatting_instructions}' : ''}`],
['placeholder', '{chat_history}'],
['human', '{input}'],
];
const hasBinaryData = this.getInputData()?.[0]?.binary !== undefined;
if (hasBinaryData && passthroughBinaryImages) {
const binaryMessage = await extractBinaryMessages(this);
// If there is binary data and the node option permits it, add a binary message
const hasBinaryData = ctx.getInputData()?.[itemIndex]?.binary !== undefined;
if (hasBinaryData && options.passthroughBinaryImages) {
const binaryMessage = await extractBinaryMessages(ctx, itemIndex);
messages.push(binaryMessage);
}
// We add the agent scratchpad last, so that the agent will not run in loops
// by adding binary messages between each interaction
messages.push(['placeholder', '{agent_scratchpad}']);
const prompt = ChatPromptTemplate.fromMessages(messages);
return messages;
}
const agent = createToolCallingAgent({
llm: model,
tools,
prompt,
streamRunnable: false,
});
agent.streamRunnable = false;
/**
* Creates the chat prompt from messages.
*
* @param messages - The messages array
* @returns The ChatPromptTemplate instance
*/
export function preparePrompt(messages: BaseMessagePromptTemplateLike[]): ChatPromptTemplate {
return ChatPromptTemplate.fromMessages(messages);
}
const runnableAgent = RunnableSequence.from([agent, agentStepsParser, fixEmptyContentMessage]);
/* -----------------------------------------------------------
Main Executor Function
----------------------------------------------------------- */
/**
* The main executor method for the Tools Agent.
*
* This function retrieves necessary components (model, memory, tools), prepares the prompt,
* creates the agent, and processes each input item. The error handling for each item is also
* managed here based on the node's continueOnFail setting.
*
* @returns The array of execution data for all processed items
*/
export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
this.logger.debug('Executing Tools Agent');
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
memory,
tools,
returnIntermediateSteps: options.returnIntermediateSteps === true,
maxIterations: options.maxIterations ?? 10,
});
const returnData: INodeExecutionData[] = [];
const items = this.getInputData();
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
try {
const model = await getChatModel(this);
const memory = await getOptionalMemory(this);
const outputParsers = await getOptionalOutputParsers(this);
const outputParser = outputParsers?.[0];
const tools = await getTools(this, outputParser);
const input = getPromptInputByType({
ctx: this,
i: itemIndex,
inputKey: 'text',
promptTypeKey: 'promptType',
});
if (input === undefined) {
throw new NodeOperationError(this.getNode(), 'The text parameter is empty.');
throw new NodeOperationError(this.getNode(), 'The “text” parameter is empty.');
}
const response = await executor.invoke({
input,
system_message: options.systemMessage ?? SYSTEM_MESSAGE,
formatting_instructions:
'IMPORTANT: Always call `format_final_response` to format your final response!',
const options = this.getNodeParameter('options', itemIndex, {}) as {
systemMessage?: string;
maxIterations?: number;
returnIntermediateSteps?: boolean;
passthroughBinaryImages?: boolean;
};
// Prepare the prompt messages and prompt template.
const messages = await prepareMessages(this, itemIndex, {
systemMessage: options.systemMessage,
passthroughBinaryImages: options.passthroughBinaryImages ?? true,
outputParser,
});
const prompt = preparePrompt(messages);
// Create the base agent that calls tools.
const agent = createToolCallingAgent({
llm: model,
tools,
prompt,
streamRunnable: false,
});
agent.streamRunnable = false;
// Wrap the agent with parsers and fixes.
const runnableAgent = RunnableSequence.from([
agent,
getAgentStepsParser(outputParser, memory),
fixEmptyContentMessage,
]);
const executor = AgentExecutor.fromAgentAndTools({
agent: runnableAgent,
memory,
tools,
returnIntermediateSteps: options.returnIntermediateSteps === true,
maxIterations: options.maxIterations ?? 10,
});
// Invoke the executor with the given input and system message.
const response = await executor.invoke(
{
input,
system_message: options.systemMessage ?? SYSTEM_MESSAGE,
formatting_instructions:
'IMPORTANT: Always call `format_final_response` to format your final response!',
},
{ signal: this.getExecutionCancelSignal() },
);
// If memory and outputParser are connected, parse the output.
if (memory && outputParser) {
const parsedOutput = jsonParse<{ output: Record<string, unknown> }>(
response.output as string,
@ -314,7 +466,8 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
response.output = parsedOutput?.output ?? parsedOutput;
}
returnData.push({
// Omit internal keys before returning the result.
const itemResult = {
json: omit(
response,
'system_message',
@ -323,7 +476,9 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
'chat_history',
'agent_scratchpad',
),
});
};
returnData.push(itemResult);
} catch (error) {
if (this.continueOnFail()) {
returnData.push({
@ -332,7 +487,6 @@ export async function toolsAgentExecute(this: IExecuteFunctions): Promise<INodeE
});
continue;
}
throw error;
}
}

View file

@ -0,0 +1,273 @@
// ToolsAgent.test.ts
import type { BaseChatMemory } from '@langchain/community/memory/chat_memory';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { HumanMessage } from '@langchain/core/messages';
import type { BaseMessagePromptTemplateLike } from '@langchain/core/prompts';
import { FakeTool } from '@langchain/core/utils/testing';
import { Buffer } from 'buffer';
import { mock } from 'jest-mock-extended';
import type { ToolsAgentAction } from 'langchain/dist/agents/tool_calling/output_parser';
import type { Tool } from 'langchain/tools';
import type { IExecuteFunctions } from 'n8n-workflow';
import { NodeOperationError, BINARY_ENCODING } from 'n8n-workflow';
import type { ZodType } from 'zod';
import { z } from 'zod';
import * as helpersModule from '@utils/helpers';
import type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser';
import {
getOutputParserSchema,
extractBinaryMessages,
fixEmptyContentMessage,
handleParsedStepOutput,
getChatModel,
getOptionalMemory,
prepareMessages,
preparePrompt,
getTools,
} from '../agents/ToolsAgent/execute';
// We need to override the imported getConnectedTools so that we control its output.
jest.spyOn(helpersModule, 'getConnectedTools').mockResolvedValue([FakeTool as unknown as Tool]);
function getFakeOutputParser(returnSchema?: ZodType): N8nOutputParser {
const fakeOutputParser = mock<N8nOutputParser>();
(fakeOutputParser.getSchema as jest.Mock).mockReturnValue(returnSchema);
return fakeOutputParser;
}
function createFakeExecuteFunctions(overrides: Partial<IExecuteFunctions> = {}): IExecuteFunctions {
return {
getNodeParameter: jest
.fn()
.mockImplementation((_arg1: string, _arg2: number, defaultValue?: unknown) => {
return defaultValue;
}),
getNode: jest.fn().mockReturnValue({}),
getInputConnectionData: jest.fn().mockResolvedValue({}),
getInputData: jest.fn().mockReturnValue([]),
continueOnFail: jest.fn().mockReturnValue(false),
logger: { debug: jest.fn() },
helpers: {},
...overrides,
} as unknown as IExecuteFunctions;
}
describe('getOutputParserSchema', () => {
it('should return a default schema if getSchema returns undefined', () => {
const schema = getOutputParserSchema(getFakeOutputParser(undefined));
// The default schema requires a "text" field.
expect(() => schema.parse({})).toThrow();
expect(schema.parse({ text: 'hello' })).toEqual({ text: 'hello' });
});
it('should return the custom schema if provided', () => {
const customSchema = z.object({ custom: z.number() });
const schema = getOutputParserSchema(getFakeOutputParser(customSchema));
expect(() => schema.parse({ custom: 'not a number' })).toThrow();
expect(schema.parse({ custom: 123 })).toEqual({ custom: 123 });
});
});
describe('extractBinaryMessages', () => {
it('should extract a binary message from the input data when no id is provided', async () => {
const fakeItem = {
binary: {
img1: {
mimeType: 'image/png',
// simulate that data already includes 'base64'
data: 'data:image/png;base64,sampledata',
},
},
};
const ctx = createFakeExecuteFunctions({
getInputData: jest.fn().mockReturnValue([fakeItem]),
});
const humanMsg: HumanMessage = await extractBinaryMessages(ctx, 0);
// Expect the HumanMessage's content to be an array containing one binary message.
expect(Array.isArray(humanMsg.content)).toBe(true);
expect(humanMsg.content[0]).toEqual({
type: 'image_url',
image_url: { url: 'data:image/png;base64,sampledata' },
});
});
it('should extract a binary message using binary stream if id is provided', async () => {
const fakeItem = {
binary: {
img2: {
mimeType: 'image/jpeg',
id: '1234',
data: 'nonsense',
},
},
};
// Cast fakeHelpers as any to satisfy type requirements.
const fakeHelpers = {
getBinaryStream: jest.fn().mockResolvedValue('stream'),
binaryToBuffer: jest.fn().mockResolvedValue(Buffer.from('fakebufferdata')),
} as unknown as IExecuteFunctions['helpers'];
const ctx = createFakeExecuteFunctions({
getInputData: jest.fn().mockReturnValue([fakeItem]),
helpers: fakeHelpers,
});
const humanMsg: HumanMessage = await extractBinaryMessages(ctx, 0);
// eslint-disable-next-line @typescript-eslint/unbound-method
expect(fakeHelpers.getBinaryStream).toHaveBeenCalledWith('1234');
// eslint-disable-next-line @typescript-eslint/unbound-method
expect(fakeHelpers.binaryToBuffer).toHaveBeenCalled();
const expectedUrl = `data:image/jpeg;base64,${Buffer.from('fakebufferdata').toString(
BINARY_ENCODING,
)}`;
expect(humanMsg.content[0]).toEqual({
type: 'image_url',
image_url: { url: expectedUrl },
});
});
});
describe('fixEmptyContentMessage', () => {
it('should replace empty string inputs with empty objects', () => {
// Cast to any to bypass type issues with AgentFinish/AgentAction.
const fakeSteps: ToolsAgentAction[] = [
{
messageLog: [
{
content: [{ input: '' }, { input: { already: 'object' } }],
},
],
},
] as unknown as ToolsAgentAction[];
const fixed = fixEmptyContentMessage(fakeSteps) as ToolsAgentAction[];
const messageContent = fixed?.[0]?.messageLog?.[0].content;
// Type assertion needed since we're extending MessageContentComplex
expect((messageContent?.[0] as { input: unknown })?.input).toEqual({});
expect((messageContent?.[1] as { input: unknown })?.input).toEqual({ already: 'object' });
});
});
describe('handleParsedStepOutput', () => {
it('should stringify the output if memory is provided', () => {
const output = { key: 'value' };
const fakeMemory = mock<BaseChatMemory>();
const result = handleParsedStepOutput(output, fakeMemory);
expect(result.returnValues).toEqual({ output: JSON.stringify(output) });
expect(result.log).toEqual('Final response formatted');
});
it('should not stringify the output if memory is not provided', () => {
const output = { key: 'value' };
const result = handleParsedStepOutput(output);
expect(result.returnValues).toEqual(output);
});
});
describe('getChatModel', () => {
it('should return the model if it is a valid chat model', async () => {
// Cast fakeChatModel as any
const fakeChatModel = mock<BaseChatModel>();
fakeChatModel.bindTools = jest.fn();
fakeChatModel.lc_namespace = ['chat_models'];
const ctx = createFakeExecuteFunctions({
getInputConnectionData: jest.fn().mockResolvedValue(fakeChatModel),
});
const model = await getChatModel(ctx);
expect(model).toEqual(fakeChatModel);
});
it('should throw if the model is not a valid chat model', async () => {
const fakeInvalidModel = mock<BaseChatModel>(); // missing bindTools & lc_namespace
fakeInvalidModel.lc_namespace = [];
const ctx = createFakeExecuteFunctions({
getInputConnectionData: jest.fn().mockResolvedValue(fakeInvalidModel),
getNode: jest.fn().mockReturnValue({}),
});
await expect(getChatModel(ctx)).rejects.toThrow(NodeOperationError);
});
});
describe('getOptionalMemory', () => {
it('should return the memory if available', async () => {
const fakeMemory = { some: 'memory' };
const ctx = createFakeExecuteFunctions({
getInputConnectionData: jest.fn().mockResolvedValue(fakeMemory),
});
const memory = await getOptionalMemory(ctx);
expect(memory).toEqual(fakeMemory);
});
});
describe('getTools', () => {
it('should retrieve tools without appending if outputParser is not provided', async () => {
const ctx = createFakeExecuteFunctions();
const tools = await getTools(ctx);
expect(tools.length).toEqual(1);
});
it('should retrieve tools and append the structured output parser tool if outputParser is provided', async () => {
const fakeOutputParser = getFakeOutputParser(z.object({ text: z.string() }));
const ctx = createFakeExecuteFunctions();
const tools = await getTools(ctx, fakeOutputParser);
// Our fake getConnectedTools returns one tool; with outputParser, one extra is appended.
expect(tools.length).toEqual(2);
const dynamicTool = tools.find((t) => t.name === 'format_final_response');
expect(dynamicTool).toBeDefined();
});
});
describe('prepareMessages', () => {
it('should include a binary message if binary data is present and passthroughBinaryImages is true', async () => {
const fakeItem = {
binary: {
img1: {
mimeType: 'image/png',
data: 'data:image/png;base64,sampledata',
},
},
};
const ctx = createFakeExecuteFunctions({
getInputData: jest.fn().mockReturnValue([fakeItem]),
});
const messages = await prepareMessages(ctx, 0, {
systemMessage: 'Test system',
passthroughBinaryImages: true,
});
// Check if any message is an instance of HumanMessage
const hasBinaryMessage = messages.some(
(m) => typeof m === 'object' && m instanceof HumanMessage,
);
expect(hasBinaryMessage).toBe(true);
});
it('should not include a binary message if no binary data is present', async () => {
const fakeItem = { json: {} }; // no binary key
const ctx = createFakeExecuteFunctions({
getInputData: jest.fn().mockReturnValue([fakeItem]),
});
const messages = await prepareMessages(ctx, 0, {
systemMessage: 'Test system',
passthroughBinaryImages: true,
});
const hasHumanMessage = messages.some((m) => m instanceof HumanMessage);
expect(hasHumanMessage).toBe(false);
});
});
describe('preparePrompt', () => {
it('should return a ChatPromptTemplate instance', () => {
const sampleMessages: BaseMessagePromptTemplateLike[] = [
['system', 'Test'],
['human', 'Hello'],
];
const prompt = preparePrompt(sampleMessages);
expect(prompt).toBeDefined();
});
});

View file

@ -34,6 +34,7 @@ import { getOptionalOutputParsers } from '@utils/output_parsers/N8nOutputParser'
import { getTemplateNoticeField } from '@utils/sharedFields';
import { getTracingConfig } from '@utils/tracing';
import { dataUriFromImageData, UnsupportedMimeTypeError } from './utils';
import {
getCustomErrorMessage as getCustomOpenAiErrorMessage,
isOpenAiError,
@ -88,21 +89,28 @@ async function getImageMessage(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;
const dataURI = `data:image/jpeg;base64,${bufferData.toString('base64')}`;
const directUriModels = [ChatGoogleGenerativeAI, ChatOllama];
const imageUrl = directUriModels.some((i) => model instanceof i)
? dataURI
: { url: dataURI, detail };
try {
const dataURI = dataUriFromImageData(binaryData, bufferData);
return new HumanMessage({
content: [
{
type: 'image_url',
image_url: imageUrl,
},
],
});
const directUriModels = [ChatGoogleGenerativeAI, ChatOllama];
const imageUrl = directUriModels.some((i) => model instanceof i)
? dataURI
: { url: dataURI, detail };
return new HumanMessage({
content: [
{
type: 'image_url',
image_url: imageUrl,
},
],
});
} catch (error) {
if (error instanceof UnsupportedMimeTypeError)
throw new NodeOperationError(context.getNode(), error.message);
throw error;
}
}
async function getChainPromptTemplate(
@ -516,16 +524,16 @@ export class ChainLlm implements INodeType {
const items = this.getInputData();
const returnData: INodeExecutionData[] = [];
const llm = (await this.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;
const outputParsers = await getOptionalOutputParsers(this);
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
try {
let prompt: string;
const llm = (await this.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;
const outputParsers = await getOptionalOutputParsers(this);
if (this.getNode().typeVersion <= 1.3) {
prompt = this.getNodeParameter('prompt', itemIndex) as string;
} else {

View file

@ -0,0 +1,23 @@
import { mock } from 'jest-mock-extended';
import type { IBinaryData } from 'n8n-workflow';
import { dataUriFromImageData, UnsupportedMimeTypeError } from '../utils';
describe('dataUriFromImageData', () => {
it('should not throw an error on images', async () => {
const mockBuffer = Buffer.from('Test data');
const mockBinaryData = mock<IBinaryData>({ mimeType: 'image/jpeg' });
const dataUri = dataUriFromImageData(mockBinaryData, mockBuffer);
expect(dataUri).toBe('data:image/jpeg;base64,VGVzdCBkYXRh');
});
it('should throw an UnsupportetMimeTypeError on non-images', async () => {
const mockBuffer = Buffer.from('Test data');
const mockBinaryData = mock<IBinaryData>({ mimeType: 'text/plain' });
expect(() => {
dataUriFromImageData(mockBinaryData, mockBuffer);
}).toThrow(UnsupportedMimeTypeError);
});
});

View file

@ -0,0 +1,12 @@
import type { IBinaryData } from 'n8n-workflow';
import { ApplicationError } from 'n8n-workflow';
export class UnsupportedMimeTypeError extends ApplicationError {}
export function dataUriFromImageData(binaryData: IBinaryData, bufferData: Buffer) {
if (!binaryData.mimeType?.startsWith('image/'))
throw new UnsupportedMimeTypeError(
`${binaryData.mimeType} is not a supported type of binary data. Only images are supported.`,
);
return `data:${binaryData.mimeType};base64,${bufferData.toString('base64')}`;
}

View file

@ -14,6 +14,7 @@ import {
type INodeType,
type INodeTypeDescription,
NodeOperationError,
parseErrorMetadata,
} from 'n8n-workflow';
import { promptTypeOptions, textFromPreviousNode } from '@utils/descriptions';
@ -162,23 +163,21 @@ export class ChainRetrievalQa implements INodeType {
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> {
this.logger.debug('Executing Retrieval QA Chain');
const model = (await this.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;
const retriever = (await this.getInputConnectionData(
NodeConnectionType.AiRetriever,
0,
)) as BaseRetriever;
const items = this.getInputData();
const returnData: INodeExecutionData[] = [];
// Run for each item
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
try {
const model = (await this.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;
const retriever = (await this.getInputConnectionData(
NodeConnectionType.AiRetriever,
0,
)) as BaseRetriever;
let query;
if (this.getNode().typeVersion <= 1.2) {
@ -225,11 +224,18 @@ export class ChainRetrievalQa implements INodeType {
const chain = RetrievalQAChain.fromLLM(model, retriever, chainParameters);
const response = await chain.withConfig(getTracingConfig(this)).invoke({ query });
const response = await chain
.withConfig(getTracingConfig(this))
.invoke({ query }, { signal: this.getExecutionCancelSignal() });
returnData.push({ json: { response } });
} catch (error) {
if (this.continueOnFail()) {
returnData.push({ json: { error: error.message }, pairedItem: { item: itemIndex } });
const metadata = parseErrorMetadata(error);
returnData.push({
json: { error: error.message },
pairedItem: { item: itemIndex },
metadata,
});
continue;
}

View file

@ -321,16 +321,16 @@ export class ChainSummarizationV2 implements INodeType {
| 'simple'
| 'advanced';
const model = (await this.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;
const items = this.getInputData();
const returnData: INodeExecutionData[] = [];
for (let itemIndex = 0; itemIndex < items.length; itemIndex++) {
try {
const model = (await this.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;
const summarizationMethodAndPrompts = this.getNodeParameter(
'options.summarizationMethodAndPrompts.values',
itemIndex,
@ -411,9 +411,12 @@ export class ChainSummarizationV2 implements INodeType {
}
const processedItem = await processor.processItem(item, itemIndex);
const response = await chain.call({
input_documents: processedItem,
});
const response = await chain.invoke(
{
input_documents: processedItem,
},
{ signal: this.getExecutionCancelSignal() },
);
returnData.push({ json: { response } });
}
} catch (error) {

View file

@ -1,7 +1,7 @@
import type { BaseLanguageModel } from '@langchain/core/language_models/base';
import { FakeLLM, FakeListChatModel } from '@langchain/core/utils/testing';
import get from 'lodash/get';
import type { IDataObject, IExecuteFunctions } from 'n8n-workflow/src';
import type { IDataObject, IExecuteFunctions } from 'n8n-workflow';
import { makeZodSchemaFromAttributes } from '../helpers';
import { InformationExtractor } from '../InformationExtractor.node';

View file

@ -108,6 +108,7 @@ export class LmChatOpenAi implements INodeType {
($credentials?.url && !$credentials.url.includes('api.openai.com')) ||
$responseItem.id.startsWith('ft:') ||
$responseItem.id.startsWith('o1') ||
$responseItem.id.startsWith('o3') ||
($responseItem.id.startsWith('gpt-') && !$responseItem.id.includes('instruct'))
}}`,
},
@ -263,6 +264,38 @@ export class LmChatOpenAi implements INodeType {
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
},
{
displayName: 'Reasoning Effort',
name: 'reasoningEffort',
default: 'medium',
description:
'Controls the amount of reasoning tokens to use. A value of "low" will favor speed and economical token usage, "high" will favor more complete reasoning at the cost of more tokens generated and slower responses.',
type: 'options',
options: [
{
name: 'Low',
value: 'low',
description: 'Favors speed and economical token usage',
},
{
name: 'Medium',
value: 'medium',
description: 'Balance between speed and reasoning accuracy',
},
{
name: 'High',
value: 'high',
description:
'Favors more complete reasoning at the cost of more tokens generated and slower responses',
},
],
displayOptions: {
show: {
// reasoning_effort is only available on o1, o1-versioned, or on o3-mini and beyond. Not on o1-mini or other GPT-models.
'/model': [{ _cnd: { regex: '(^o1([-\\d]+)?$)|(^o[3-9].*)' } }],
},
},
},
{
displayName: 'Timeout',
name: 'timeout',
@ -310,6 +343,7 @@ export class LmChatOpenAi implements INodeType {
temperature?: number;
topP?: number;
responseFormat?: 'text' | 'json_object';
reasoningEffort?: 'low' | 'medium' | 'high';
};
const configuration: ClientOptions = {};
@ -319,6 +353,15 @@ export class LmChatOpenAi implements INodeType {
configuration.baseURL = credentials.url as string;
}
// Extra options to send to OpenAI, that are not directly supported by LangChain
const modelKwargs: {
response_format?: object;
reasoning_effort?: 'low' | 'medium' | 'high';
} = {};
if (options.responseFormat) modelKwargs.response_format = { type: options.responseFormat };
if (options.reasoningEffort && ['low', 'medium', 'high'].includes(options.reasoningEffort))
modelKwargs.reasoning_effort = options.reasoningEffort;
const model = new ChatOpenAI({
openAIApiKey: credentials.apiKey as string,
modelName,
@ -327,11 +370,7 @@ export class LmChatOpenAi implements INodeType {
maxRetries: options.maxRetries ?? 2,
configuration,
callbacks: [new N8nLlmTracing(this)],
modelKwargs: options.responseFormat
? {
response_format: { type: options.responseFormat },
}
: undefined,
modelKwargs,
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),
});

View file

@ -19,6 +19,7 @@ export async function searchModels(
(baseURL && !baseURL.includes('api.openai.com')) ||
model.id.startsWith('ft:') ||
model.id.startsWith('o1') ||
model.id.startsWith('o3') ||
(model.id.startsWith('gpt-') && !model.id.includes('instruct'));
if (!filter) return isValidModel;

View file

@ -1,12 +1,13 @@
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import type { SafetySetting } from '@google/generative-ai';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import {
NodeConnectionType,
type INodeType,
type INodeTypeDescription,
type ISupplyDataFunctions,
type SupplyData,
import { NodeConnectionType } from 'n8n-workflow';
import type {
NodeError,
INodeType,
INodeTypeDescription,
ISupplyDataFunctions,
SupplyData,
} from 'n8n-workflow';
import { getConnectionHintNoticeField } from '@utils/sharedFields';
@ -15,6 +16,13 @@ import { additionalOptions } from '../gemini-common/additional-options';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
import { N8nLlmTracing } from '../N8nLlmTracing';
function errorDescriptionMapper(error: NodeError) {
if (error.description?.includes('properties: should be non-empty for OBJECT type')) {
return 'Google Gemini requires at least one <a href="https://docs.n8n.io/advanced-ai/examples/using-the-fromai-function/" target="_blank">dynamic parameter</a> when using tools';
}
return error.description ?? 'Unknown error';
}
export class LmChatGoogleGemini implements INodeType {
description: INodeTypeDescription = {
displayName: 'Google Gemini Chat Model',
@ -147,7 +155,7 @@ export class LmChatGoogleGemini implements INodeType {
temperature: options.temperature,
maxOutputTokens: options.maxOutputTokens,
safetySettings,
callbacks: [new N8nLlmTracing(this)],
callbacks: [new N8nLlmTracing(this, { errorDescriptionMapper })],
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this),
});

View file

@ -61,11 +61,15 @@ export class N8nLlmTracing extends BaseCallbackHandler {
totalTokens: completionTokens + promptTokens,
};
},
errorDescriptionMapper: (error: NodeError) => error.description,
};
constructor(
private executionFunctions: ISupplyDataFunctions,
options?: { tokensUsageParser: TokensUsageParser },
options?: {
tokensUsageParser?: TokensUsageParser;
errorDescriptionMapper?: (error: NodeError) => string;
},
) {
super();
this.options = { ...this.options, ...options };
@ -192,6 +196,10 @@ export class N8nLlmTracing extends BaseCallbackHandler {
}
if (error instanceof NodeError) {
if (this.options.errorDescriptionMapper) {
error.description = this.options.errorDescriptionMapper(error);
}
this.executionFunctions.addOutputData(this.connectionType, runDetails.index, error);
} else {
// If the error is not a NodeError, we wrap it in a NodeOperationError

View file

@ -15,7 +15,11 @@ import type {
} from 'n8n-workflow';
import { jsonParse, NodeConnectionType, NodeOperationError } from 'n8n-workflow';
import { inputSchemaField, jsonSchemaExampleField, schemaTypeField } from '@utils/descriptions';
import {
buildInputSchemaField,
buildJsonSchemaExampleField,
schemaTypeField,
} from '@utils/descriptions';
import { convertJsonSchemaToZod, generateSchema } from '@utils/schemaParsing';
import { getConnectionHintNoticeField } from '@utils/sharedFields';
@ -168,8 +172,8 @@ export class ToolCode implements INodeType {
default: false,
},
{ ...schemaTypeField, displayOptions: { show: { specifyInputSchema: [true] } } },
jsonSchemaExampleField,
inputSchemaField,
buildJsonSchemaExampleField({ showExtraProps: { specifyInputSchema: [true] } }),
buildInputSchemaField({ showExtraProps: { specifyInputSchema: [true] } }),
],
};

View file

@ -237,6 +237,69 @@ describe('ToolHttpRequest', () => {
}),
);
});
it('should return the error when receiving text that contains a null character', async () => {
helpers.httpRequest.mockResolvedValue({
body: 'Hello\0World',
headers: {
'content-type': 'text/plain',
},
});
executeFunctions.getNodeParameter.mockImplementation((paramName: string) => {
switch (paramName) {
case 'method':
return 'GET';
case 'url':
return 'https://httpbin.org/text/plain';
case 'options':
return {};
case 'placeholderDefinitions.values':
return [];
default:
return undefined;
}
});
const { response } = await httpTool.supplyData.call(executeFunctions, 0);
const res = await (response as N8nTool).invoke({});
expect(helpers.httpRequest).toHaveBeenCalled();
// Check that the returned string is formatted as an error message.
expect(res).toContain('error');
expect(res).toContain('Binary data is not supported');
});
it('should return the error when receiving a JSON response containing a null character', async () => {
// Provide a raw JSON string with a literal null character.
helpers.httpRequest.mockResolvedValue({
body: '{"message":"hello\0world"}',
headers: {
'content-type': 'application/json',
},
});
executeFunctions.getNodeParameter.mockImplementation((paramName: string) => {
switch (paramName) {
case 'method':
return 'GET';
case 'url':
return 'https://httpbin.org/json';
case 'options':
return {};
case 'placeholderDefinitions.values':
return [];
default:
return undefined;
}
});
const { response } = await httpTool.supplyData.call(executeFunctions, 0);
const res = await (response as N8nTool).invoke({});
expect(helpers.httpRequest).toHaveBeenCalled();
// Check that the tool returns an error string rather than resolving to valid JSON.
expect(res).toContain('error');
expect(res).toContain('Binary data is not supported');
});
});
describe('Optimize response', () => {

View file

@ -5,7 +5,6 @@ import { JSDOM } from 'jsdom';
import get from 'lodash/get';
import set from 'lodash/set';
import unset from 'lodash/unset';
import * as mime from 'mime-types';
import { getOAuth2AdditionalParameters } from 'n8n-nodes-base/dist/nodes/HttpRequest/GenericFunctions';
import type {
IDataObject,
@ -146,6 +145,25 @@ const defaultOptimizer = <T>(response: T) => {
return String(response);
};
function isBinary(data: unknown) {
// Check if data is a Buffer
if (Buffer.isBuffer(data)) {
return true;
}
// If data is a string, assume it's text unless it contains null characters.
if (typeof data === 'string') {
// If the string contains a null character, it's likely binary.
if (data.includes('\0')) {
return true;
}
return false;
}
// For any other type, assume it's not binary.
return false;
}
const htmlOptimizer = (ctx: ISupplyDataFunctions, itemIndex: number, maxLength: number) => {
const cssSelector = ctx.getNodeParameter('cssSelector', itemIndex, '') as string;
const onlyContent = ctx.getNodeParameter('onlyContent', itemIndex, false) as boolean;
@ -755,13 +773,8 @@ export const configureToolFunction = (
if (!response) {
try {
// Check if the response is binary data
if (fullResponse?.headers?.['content-type']) {
const contentType = fullResponse.headers['content-type'] as string;
const mimeType = contentType.split(';')[0].trim();
if (mime.charset(mimeType) !== 'UTF-8') {
throw new NodeOperationError(ctx.getNode(), 'Binary data is not supported');
}
if (fullResponse.body && isBinary(fullResponse.body)) {
throw new NodeOperationError(ctx.getNode(), 'Binary data is not supported');
}
response = optimizeResponse(fullResponse.body);

View file

@ -11,9 +11,14 @@ import type {
import { WorkflowToolService } from './utils/WorkflowToolService';
type ISupplyDataFunctionsWithRunIndex = ISupplyDataFunctions & { runIndex: number };
// Mock ISupplyDataFunctions interface
function createMockContext(overrides?: Partial<ISupplyDataFunctions>): ISupplyDataFunctions {
function createMockContext(
overrides?: Partial<ISupplyDataFunctions>,
): ISupplyDataFunctionsWithRunIndex {
return {
runIndex: 0,
getNodeParameter: jest.fn(),
getWorkflowDataProxy: jest.fn(),
getNode: jest.fn(),
@ -35,11 +40,11 @@ function createMockContext(overrides?: Partial<ISupplyDataFunctions>): ISupplyDa
warn: jest.fn(),
},
...overrides,
} as ISupplyDataFunctions;
} as ISupplyDataFunctionsWithRunIndex;
}
describe('WorkflowTool::WorkflowToolService', () => {
let context: ISupplyDataFunctions;
let context: ISupplyDataFunctionsWithRunIndex;
let service: WorkflowToolService;
beforeEach(() => {
@ -93,6 +98,7 @@ describe('WorkflowTool::WorkflowToolService', () => {
expect(result).toBe(JSON.stringify(TEST_RESPONSE, null, 2));
expect(context.addOutputData).toHaveBeenCalled();
expect(context.runIndex).toBe(1);
});
it('should handle errors during tool execution', async () => {

View file

@ -4,14 +4,21 @@ import type { ILocalLoadOptionsFunctions, ResourceMapperFields } from 'n8n-workf
export async function loadSubWorkflowInputs(
this: ILocalLoadOptionsFunctions,
): Promise<ResourceMapperFields> {
const { fields, subworkflowInfo } = await loadWorkflowInputMappings.bind(this)();
const { fields, subworkflowInfo, dataMode } = await loadWorkflowInputMappings.bind(this)();
let emptyFieldsNotice: string | undefined;
if (fields.length === 0) {
const subworkflowLink = subworkflowInfo?.id
? `<a href="/workflow/${subworkflowInfo?.id}" target="_blank">sub-workflows trigger</a>`
: 'sub-workflows trigger';
emptyFieldsNotice = `This sub-workflow will not receive any input when called by your AI node. Define your expected input in the ${subworkflowLink}.`;
switch (dataMode) {
case 'passthrough':
emptyFieldsNotice = `This sub-workflow is set up to receive all input data, without specific inputs the Agent will not be able to pass data to this tool. You can define specific inputs in the ${subworkflowLink}.`;
break;
default:
emptyFieldsNotice = `This sub-workflow will not receive any input when called by your AI node. Define your expected input in the ${subworkflowLink}.`;
break;
}
}
return { fields, emptyFieldsNotice };
}

View file

@ -1,284 +0,0 @@
import type { ISupplyDataFunctions } from 'n8n-workflow';
import { jsonParse, NodeOperationError } from 'n8n-workflow';
import { z } from 'zod';
type AllowedTypes = 'string' | 'number' | 'boolean' | 'json';
export interface FromAIArgument {
key: string;
description?: string;
type?: AllowedTypes;
defaultValue?: string | number | boolean | Record<string, unknown>;
}
// TODO: We copied this class from the core package, once the new nodes context work is merged, this should be available in root node context and this file can be removed.
// Please apply any changes to both files
/**
* AIParametersParser
*
* This class encapsulates the logic for parsing node parameters, extracting $fromAI calls,
* generating Zod schemas, and creating LangChain tools.
*/
export class AIParametersParser {
private ctx: ISupplyDataFunctions;
/**
* Constructs an instance of AIParametersParser.
* @param ctx The execution context.
*/
constructor(ctx: ISupplyDataFunctions) {
this.ctx = ctx;
}
/**
* Generates a Zod schema based on the provided FromAIArgument placeholder.
* @param placeholder The FromAIArgument object containing key, type, description, and defaultValue.
* @returns A Zod schema corresponding to the placeholder's type and constraints.
*/
generateZodSchema(placeholder: FromAIArgument): z.ZodTypeAny {
let schema: z.ZodTypeAny;
switch (placeholder.type?.toLowerCase()) {
case 'string':
schema = z.string();
break;
case 'number':
schema = z.number();
break;
case 'boolean':
schema = z.boolean();
break;
case 'json':
schema = z.record(z.any());
break;
default:
schema = z.string();
}
if (placeholder.description) {
schema = schema.describe(`${schema.description ?? ''} ${placeholder.description}`.trim());
}
if (placeholder.defaultValue !== undefined) {
schema = schema.default(placeholder.defaultValue);
}
return schema;
}
/**
* Recursively traverses the nodeParameters object to find all $fromAI calls.
* @param payload The current object or value being traversed.
* @param collectedArgs The array collecting FromAIArgument objects.
*/
traverseNodeParameters(payload: unknown, collectedArgs: FromAIArgument[]) {
if (typeof payload === 'string') {
const fromAICalls = this.extractFromAICalls(payload);
fromAICalls.forEach((call) => collectedArgs.push(call));
} else if (Array.isArray(payload)) {
payload.forEach((item: unknown) => this.traverseNodeParameters(item, collectedArgs));
} else if (typeof payload === 'object' && payload !== null) {
Object.values(payload).forEach((value) => this.traverseNodeParameters(value, collectedArgs));
}
}
/**
* Extracts all $fromAI calls from a given string
* @param str The string to search for $fromAI calls.
* @returns An array of FromAIArgument objects.
*
* This method uses a regular expression to find the start of each $fromAI function call
* in the input string. It then employs a character-by-character parsing approach to
* accurately extract the arguments of each call, handling nested parentheses and quoted strings.
*
* The parsing process:
* 1. Finds the starting position of a $fromAI call using regex.
* 2. Iterates through characters, keeping track of parentheses depth and quote status.
* 3. Handles escaped characters within quotes to avoid premature quote closing.
* 4. Builds the argument string until the matching closing parenthesis is found.
* 5. Parses the extracted argument string into a FromAIArgument object.
* 6. Repeats the process for all $fromAI calls in the input string.
*
*/
extractFromAICalls(str: string): FromAIArgument[] {
const args: FromAIArgument[] = [];
// Regular expression to match the start of a $fromAI function call
const pattern = /\$fromAI\s*\(\s*/gi;
let match: RegExpExecArray | null;
while ((match = pattern.exec(str)) !== null) {
const startIndex = match.index + match[0].length;
let current = startIndex;
let inQuotes = false;
let quoteChar = '';
let parenthesesCount = 1;
let argsString = '';
// Parse the arguments string, handling nested parentheses and quotes
while (current < str.length && parenthesesCount > 0) {
const char = str[current];
if (inQuotes) {
// Handle characters inside quotes, including escaped characters
if (char === '\\' && current + 1 < str.length) {
argsString += char + str[current + 1];
current += 2;
continue;
}
if (char === quoteChar) {
inQuotes = false;
quoteChar = '';
}
argsString += char;
} else {
// Handle characters outside quotes
if (['"', "'", '`'].includes(char)) {
inQuotes = true;
quoteChar = char;
} else if (char === '(') {
parenthesesCount++;
} else if (char === ')') {
parenthesesCount--;
}
// Only add characters if we're still inside the main parentheses
if (parenthesesCount > 0 || char !== ')') {
argsString += char;
}
}
current++;
}
// If parentheses are balanced, parse the arguments
if (parenthesesCount === 0) {
try {
const parsedArgs = this.parseArguments(argsString);
args.push(parsedArgs);
} catch (error) {
// If parsing fails, throw an ApplicationError with details
throw new NodeOperationError(
this.ctx.getNode(),
`Failed to parse $fromAI arguments: ${argsString}: ${error}`,
);
}
} else {
// Log an error if parentheses are unbalanced
throw new NodeOperationError(
this.ctx.getNode(),
`Unbalanced parentheses while parsing $fromAI call: ${str.slice(startIndex)}`,
);
}
}
return args;
}
/**
* Parses the arguments of a single $fromAI function call.
* @param argsString The string containing the function arguments.
* @returns A FromAIArgument object.
*/
parseArguments(argsString: string): FromAIArgument {
// Split arguments by commas not inside quotes
const args: string[] = [];
let currentArg = '';
let inQuotes = false;
let quoteChar = '';
let escapeNext = false;
for (let i = 0; i < argsString.length; i++) {
const char = argsString[i];
if (escapeNext) {
currentArg += char;
escapeNext = false;
continue;
}
if (char === '\\') {
escapeNext = true;
continue;
}
if (['"', "'", '`'].includes(char)) {
if (!inQuotes) {
inQuotes = true;
quoteChar = char;
currentArg += char;
} else if (char === quoteChar) {
inQuotes = false;
quoteChar = '';
currentArg += char;
} else {
currentArg += char;
}
continue;
}
if (char === ',' && !inQuotes) {
args.push(currentArg.trim());
currentArg = '';
continue;
}
currentArg += char;
}
if (currentArg) {
args.push(currentArg.trim());
}
// Remove surrounding quotes if present
const cleanArgs = args.map((arg) => {
const trimmed = arg.trim();
if (
(trimmed.startsWith("'") && trimmed.endsWith("'")) ||
(trimmed.startsWith('`') && trimmed.endsWith('`')) ||
(trimmed.startsWith('"') && trimmed.endsWith('"'))
) {
return trimmed
.slice(1, -1)
.replace(/\\'/g, "'")
.replace(/\\`/g, '`')
.replace(/\\"/g, '"')
.replace(/\\\\/g, '\\');
}
return trimmed;
});
const type = cleanArgs?.[2] || 'string';
if (!['string', 'number', 'boolean', 'json'].includes(type.toLowerCase())) {
throw new NodeOperationError(this.ctx.getNode(), `Invalid type: ${type}`);
}
return {
key: cleanArgs[0] || '',
description: cleanArgs[1],
type: (cleanArgs?.[2] ?? 'string') as AllowedTypes,
defaultValue: this.parseDefaultValue(cleanArgs[3]),
};
}
/**
* Parses the default value, preserving its original type.
* @param value The default value as a string.
* @returns The parsed default value in its appropriate type.
*/
parseDefaultValue(
value: string | undefined,
): string | number | boolean | Record<string, unknown> | undefined {
if (value === undefined || value === '') return undefined;
const lowerValue = value.toLowerCase();
if (lowerValue === 'true') return true;
if (lowerValue === 'false') return false;
if (!isNaN(Number(value))) return Number(value);
try {
return jsonParse(value);
} catch {
return value;
}
}
}

View file

@ -8,6 +8,7 @@ import { getCurrentWorkflowInputData } from 'n8n-nodes-base/dist/utils/workflowI
import type {
ExecuteWorkflowData,
ExecutionError,
FromAIArgument,
IDataObject,
IExecuteWorkflowInfo,
INodeExecutionData,
@ -18,12 +19,16 @@ import type {
IWorkflowDataProxyData,
ResourceMapperValue,
} from 'n8n-workflow';
import { jsonParse, NodeConnectionType, NodeOperationError } from 'n8n-workflow';
import {
generateZodSchema,
jsonParse,
NodeConnectionType,
NodeOperationError,
parseErrorMetadata,
traverseNodeParameters,
} from 'n8n-workflow';
import { z } from 'zod';
import type { FromAIArgument } from './FromAIParser';
import { AIParametersParser } from './FromAIParser';
/**
Main class for creating the Workflow tool
Processes the node parameters and creates AI Agent tool capable of executing n8n workflows
@ -88,8 +93,13 @@ export class WorkflowToolService {
} catch (error) {
const executionError = error as ExecutionError;
const errorResponse = `There was an error: "${executionError.message}"`;
void this.context.addOutputData(NodeConnectionType.AiTool, index, executionError);
const metadata = parseErrorMetadata(error);
void this.context.addOutputData(NodeConnectionType.AiTool, index, executionError, metadata);
return errorResponse;
} finally {
// @ts-expect-error this accesses a private member on the actual implementation to fix https://linear.app/n8n/issue/ADO-3186/bug-workflowtool-v2-always-uses-first-row-of-input-data
this.context.runIndex++;
}
};
@ -275,8 +285,7 @@ export class WorkflowToolService {
description: string,
func: (query: string | IDataObject, runManager?: CallbackManagerForToolRun) => Promise<string>,
): Promise<DynamicStructuredTool | DynamicTool> {
const fromAIParser = new AIParametersParser(this.context);
const collectedArguments = await this.extractFromAIParameters(fromAIParser);
const collectedArguments = await this.extractFromAIParameters();
// If there are no `fromAI` arguments, fallback to creating a simple tool
if (collectedArguments.length === 0) {
@ -284,15 +293,13 @@ export class WorkflowToolService {
}
// Otherwise, prepare Zod schema and create a structured tool
const schema = this.createZodSchema(collectedArguments, fromAIParser);
const schema = this.createZodSchema(collectedArguments);
return new DynamicStructuredTool({ schema, name, description, func });
}
private async extractFromAIParameters(
fromAIParser: AIParametersParser,
): Promise<FromAIArgument[]> {
private async extractFromAIParameters(): Promise<FromAIArgument[]> {
const collectedArguments: FromAIArgument[] = [];
fromAIParser.traverseNodeParameters(this.context.getNode().parameters, collectedArguments);
traverseNodeParameters(this.context.getNode().parameters, collectedArguments);
const uniqueArgsMap = new Map<string, FromAIArgument>();
for (const arg of collectedArguments) {
@ -302,9 +309,9 @@ export class WorkflowToolService {
return Array.from(uniqueArgsMap.values());
}
private createZodSchema(args: FromAIArgument[], parser: AIParametersParser): z.ZodObject<any> {
private createZodSchema(args: FromAIArgument[]): z.ZodObject<any> {
const schemaObj = args.reduce((acc: Record<string, z.ZodTypeAny>, placeholder) => {
acc[placeholder.key] = parser.generateZodSchema(placeholder);
acc[placeholder.key] = generateZodSchema(placeholder);
return acc;
}, {});

View file

@ -22,7 +22,7 @@ import { promptTypeOptions } from '@utils/descriptions';
import { getConnectedTools } from '@utils/helpers';
import { getTracingConfig } from '@utils/tracing';
import { formatToOpenAIAssistantTool } from '../../helpers/utils';
import { formatToOpenAIAssistantTool, getChatMessages } from '../../helpers/utils';
import { assistantRLC } from '../descriptions';
const properties: INodeProperties[] = [
@ -252,7 +252,7 @@ export async function execute(this: IExecuteFunctions, i: number): Promise<INode
};
let thread: OpenAIClient.Beta.Threads.Thread;
if (memory) {
const chatMessages = await memory.chatHistory.getMessages();
const chatMessages = await getChatMessages(memory);
// Construct a new thread from the chat history to map the memory
if (chatMessages.length) {

View file

@ -1,5 +1,7 @@
import type { BaseMessage } from '@langchain/core/messages';
import type { StructuredTool } from '@langchain/core/tools';
import type { OpenAIClient } from '@langchain/openai';
import type { BufferWindowMemory } from 'langchain/memory';
import { zodToJsonSchema } from 'zod-to-json-schema';
// Copied from langchain(`langchain/src/tools/convert_to_openai.ts`)
@ -43,3 +45,7 @@ export function formatToOpenAIAssistantTool(tool: StructuredTool): OpenAIClient.
},
};
}
export async function getChatMessages(memory: BufferWindowMemory): Promise<BaseMessage[]> {
return (await memory.loadMemoryVariables({}))[memory.memoryKey] as BaseMessage[];
}

View file

@ -84,7 +84,8 @@ export async function modelSearch(
isCustomAPI ||
model.id.startsWith('gpt-') ||
model.id.startsWith('ft:') ||
model.id.startsWith('o1'),
model.id.startsWith('o1') ||
model.id.startsWith('o3'),
)(this, filter);
}

View file

@ -0,0 +1,46 @@
import { AIMessage, HumanMessage } from '@langchain/core/messages';
import { BufferWindowMemory } from 'langchain/memory';
import { getChatMessages } from '../helpers/utils';
describe('OpenAI message history', () => {
it('should only get a limited number of messages', async () => {
const memory = new BufferWindowMemory({
returnMessages: true,
k: 2,
});
expect(await getChatMessages(memory)).toEqual([]);
await memory.saveContext(
[new HumanMessage({ content: 'human 1' })],
[new AIMessage({ content: 'ai 1' })],
);
// `k` means turns, but `getChatMessages` returns messages, so a Human and an AI message.
expect((await getChatMessages(memory)).length).toEqual(2);
await memory.saveContext(
[new HumanMessage({ content: 'human 2' })],
[new AIMessage({ content: 'ai 2' })],
);
expect((await getChatMessages(memory)).length).toEqual(4);
expect((await getChatMessages(memory)).map((msg) => msg.content)).toEqual([
'human 1',
'ai 1',
'human 2',
'ai 2',
]);
// We expect this to be trimmed...
await memory.saveContext(
[new HumanMessage({ content: 'human 3' })],
[new AIMessage({ content: 'ai 3' })],
);
expect((await getChatMessages(memory)).length).toEqual(4);
expect((await getChatMessages(memory)).map((msg) => msg.content)).toEqual([
'human 2',
'ai 2',
'human 3',
'ai 3',
]);
});
});

View file

@ -1,6 +1,6 @@
{
"name": "@n8n/n8n-nodes-langchain",
"version": "1.77.0",
"version": "1.79.0",
"description": "",
"main": "index.js",
"scripts": {

View file

@ -1,4 +1,4 @@
import type { INodeProperties } from 'n8n-workflow';
import type { DisplayCondition, INodeProperties, NodeParameterValue } from 'n8n-workflow';
export const schemaTypeField: INodeProperties = {
displayName: 'Schema Type',
@ -21,7 +21,9 @@ export const schemaTypeField: INodeProperties = {
description: 'How to specify the schema for the function',
};
export const jsonSchemaExampleField: INodeProperties = {
export const buildJsonSchemaExampleField = (props?: {
showExtraProps?: Record<string, Array<NodeParameterValue | DisplayCondition> | undefined>;
}): INodeProperties => ({
displayName: 'JSON Example',
name: 'jsonSchemaExample',
type: 'json',
@ -34,13 +36,18 @@ export const jsonSchemaExampleField: INodeProperties = {
},
displayOptions: {
show: {
...props?.showExtraProps,
schemaType: ['fromJson'],
},
},
description: 'Example JSON object to use to generate the schema',
};
});
export const inputSchemaField: INodeProperties = {
export const jsonSchemaExampleField = buildJsonSchemaExampleField();
export const buildInputSchemaField = (props?: {
showExtraProps?: Record<string, Array<NodeParameterValue | DisplayCondition> | undefined>;
}): INodeProperties => ({
displayName: 'Input Schema',
name: 'inputSchema',
type: 'json',
@ -59,11 +66,14 @@ export const inputSchemaField: INodeProperties = {
},
displayOptions: {
show: {
...props?.showExtraProps,
schemaType: ['manual'],
},
},
description: 'Schema to use for the function',
};
});
export const inputSchemaField = buildInputSchemaField();
export const promptTypeOptions: INodeProperties = {
displayName: 'Source for Prompt (User Message)',

View file

@ -16,7 +16,7 @@ import type {
ISupplyDataFunctions,
ITaskMetadata,
} from 'n8n-workflow';
import { NodeOperationError, NodeConnectionType } from 'n8n-workflow';
import { NodeOperationError, NodeConnectionType, parseErrorMetadata } from 'n8n-workflow';
import { logAiEvent, isToolsInstance, isBaseChatMemory, isBaseChatMessageHistory } from './helpers';
import { N8nBinaryLoader } from './N8nBinaryLoader';
@ -41,10 +41,12 @@ export async function callMethodAsync<T>(
functionality: 'configuration-node',
});
const metadata = parseErrorMetadata(error);
parameters.executeFunctions.addOutputData(
parameters.connectionType,
parameters.currentNodeRunIndex,
error,
metadata,
);
if (error.message) {

View file

@ -1,6 +1,6 @@
{
"name": "@n8n/task-runner",
"version": "1.15.0",
"version": "1.17.0",
"scripts": {
"clean": "rimraf dist .turbo",
"start": "node dist/start.js",

View file

@ -850,7 +850,7 @@ describe('JsTaskRunner', () => {
});
});
test.each([['items'], ['$input.all()'], ["$('Trigger').all()"]])(
test.each([['items'], ['$input.all()'], ["$('Trigger').all()"], ['$items()']])(
'should have all input items in the context as %s',
async (expression) => {
const outcome = await executeForAllItems({

Some files were not shown because too many files have changed in this diff Show more