diff --git a/package.json b/package.json
index 300b2dff71..6233ff77a6 100644
--- a/package.json
+++ b/package.json
@@ -89,7 +89,8 @@
"typescript": "^5.3.0",
"xml2js": "^0.5.0",
"cpy@8>globby": "^11.1.0",
- "qqjs>globby": "^11.1.0"
+ "qqjs>globby": "^11.1.0",
+ "@langchain/core": "^0.1.8"
},
"patchedDependencies": {
"typedi@0.10.0": "patches/typedi@0.10.0.patch",
diff --git a/packages/@n8n/nodes-langchain/credentials/MistralCloudApi.credentials.ts b/packages/@n8n/nodes-langchain/credentials/MistralCloudApi.credentials.ts
new file mode 100644
index 0000000000..fa7c1ff215
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/credentials/MistralCloudApi.credentials.ts
@@ -0,0 +1,42 @@
+import type {
+ IAuthenticateGeneric,
+ ICredentialTestRequest,
+ ICredentialType,
+ INodeProperties,
+} from 'n8n-workflow';
+
+export class MistralCloudApi implements ICredentialType {
+ name = 'mistralCloudApi';
+
+ displayName = 'Mistral Cloud API';
+
+ documentationUrl = 'mistralCloud';
+
+ properties: INodeProperties[] = [
+ {
+ displayName: 'API Key',
+ name: 'apiKey',
+ type: 'string',
+ typeOptions: { password: true },
+ required: true,
+ default: '',
+ },
+ ];
+
+ authenticate: IAuthenticateGeneric = {
+ type: 'generic',
+ properties: {
+ headers: {
+ Authorization: '=Bearer {{$credentials.apiKey}}',
+ },
+ },
+ };
+
+ test: ICredentialTestRequest = {
+ request: {
+ baseURL: 'https://api.mistral.ai/v1',
+ url: '/models',
+ method: 'GET',
+ },
+ };
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsMistralCloud/EmbeddingsMistralCloud.node.ts b/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsMistralCloud/EmbeddingsMistralCloud.node.ts
new file mode 100644
index 0000000000..d8223a2ffe
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsMistralCloud/EmbeddingsMistralCloud.node.ts
@@ -0,0 +1,156 @@
+/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
+import {
+ NodeConnectionType,
+ type IExecuteFunctions,
+ type INodeType,
+ type INodeTypeDescription,
+ type SupplyData,
+} from 'n8n-workflow';
+import type { MistralAIEmbeddingsParams } from '@langchain/mistralai';
+import { MistralAIEmbeddings } from '@langchain/mistralai';
+import { logWrapper } from '../../../utils/logWrapper';
+import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
+
+export class EmbeddingsMistralCloud implements INodeType {
+ description: INodeTypeDescription = {
+ displayName: 'Embeddings Mistral Cloud',
+ name: 'embeddingsMistralCloud',
+ icon: 'file:mistral.svg',
+ credentials: [
+ {
+ name: 'mistralCloudApi',
+ required: true,
+ },
+ ],
+ group: ['transform'],
+ version: 1,
+ description: 'Use Embeddings Mistral Cloud',
+ defaults: {
+ name: 'Embeddings Mistral Cloud',
+ },
+
+ codex: {
+ categories: ['AI'],
+ subcategories: {
+ AI: ['Embeddings'],
+ },
+ resources: {
+ primaryDocumentation: [
+ {
+ url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.embeddingsmistralcloud/',
+ },
+ ],
+ },
+ },
+ // eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
+ inputs: [],
+ // eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
+ outputs: [NodeConnectionType.AiEmbedding],
+ outputNames: ['Embeddings'],
+ requestDefaults: {
+ ignoreHttpStatusErrors: true,
+ baseURL: 'https://api.mistral.ai/v1',
+ },
+ properties: [
+ getConnectionHintNoticeField([NodeConnectionType.AiVectorStore]),
+ {
+ displayName: 'Model',
+ name: 'model',
+ type: 'options',
+ description:
+ 'The model which will compute the embeddings. Learn more.',
+ typeOptions: {
+ loadOptions: {
+ routing: {
+ request: {
+ method: 'GET',
+ url: '/models',
+ },
+ output: {
+ postReceive: [
+ {
+ type: 'rootProperty',
+ properties: {
+ property: 'data',
+ },
+ },
+ {
+ type: 'filter',
+ properties: {
+ pass: "={{ $responseItem.id.includes('embed') }}",
+ },
+ },
+ {
+ type: 'setKeyValue',
+ properties: {
+ name: '={{ $responseItem.id }}',
+ value: '={{ $responseItem.id }}',
+ },
+ },
+ {
+ type: 'sort',
+ properties: {
+ key: 'name',
+ },
+ },
+ ],
+ },
+ },
+ },
+ },
+ routing: {
+ send: {
+ type: 'body',
+ property: 'model',
+ },
+ },
+ default: 'mistral-embed',
+ },
+ {
+ displayName: 'Options',
+ name: 'options',
+ placeholder: 'Add Option',
+ description: 'Additional options to add',
+ type: 'collection',
+ default: {},
+ options: [
+ {
+ displayName: 'Batch Size',
+ name: 'batchSize',
+ default: 512,
+ typeOptions: { maxValue: 2048 },
+ description: 'Maximum number of documents to send in each request',
+ type: 'number',
+ },
+ {
+ displayName: 'Strip New Lines',
+ name: 'stripNewLines',
+ default: true,
+ description: 'Whether to strip new lines from the input text',
+ type: 'boolean',
+ },
+ ],
+ },
+ ],
+ };
+
+ async supplyData(this: IExecuteFunctions, itemIndex: number): Promise {
+ const credentials = await this.getCredentials('mistralCloudApi');
+ const modelName = this.getNodeParameter('model', itemIndex) as string;
+ const options = this.getNodeParameter(
+ 'options',
+ itemIndex,
+ {},
+ ) as Partial;
+
+ const embeddings = new MistralAIEmbeddings({
+ apiKey: credentials.apiKey as string,
+ modelName,
+ ...options,
+ });
+
+ return {
+ response: logWrapper(embeddings, this),
+ };
+ }
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsMistralCloud/mistral.svg b/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsMistralCloud/mistral.svg
new file mode 100644
index 0000000000..928faf4518
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsMistralCloud/mistral.svg
@@ -0,0 +1,262 @@
+
+
diff --git a/packages/@n8n/nodes-langchain/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.ts b/packages/@n8n/nodes-langchain/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.ts
new file mode 100644
index 0000000000..a23ab4c0c1
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.ts
@@ -0,0 +1,197 @@
+/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
+import {
+ NodeConnectionType,
+ type IExecuteFunctions,
+ type INodeType,
+ type INodeTypeDescription,
+ type SupplyData,
+} from 'n8n-workflow';
+
+import type { ChatMistralAIInput } from '@langchain/mistralai';
+import { ChatMistralAI } from '@langchain/mistralai';
+import { logWrapper } from '../../../utils/logWrapper';
+import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
+
+export class LmChatMistralCloud implements INodeType {
+ description: INodeTypeDescription = {
+ displayName: 'Mistral Cloud Chat Model',
+ // eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
+ name: 'lmChatMistralCloud',
+ icon: 'file:mistral.svg',
+ group: ['transform'],
+ version: 1,
+ description: 'For advanced usage with an AI chain',
+ defaults: {
+ name: 'Mistral Cloud Chat Model',
+ },
+ codex: {
+ categories: ['AI'],
+ subcategories: {
+ AI: ['Language Models'],
+ },
+ resources: {
+ primaryDocumentation: [
+ {
+ url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatmistralcloud/',
+ },
+ ],
+ },
+ },
+ // eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
+ inputs: [],
+ // eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
+ outputs: [NodeConnectionType.AiLanguageModel],
+ outputNames: ['Model'],
+ credentials: [
+ {
+ name: 'mistralCloudApi',
+ required: true,
+ },
+ ],
+ requestDefaults: {
+ ignoreHttpStatusErrors: true,
+ baseURL: 'https://api.mistral.ai/v1',
+ },
+ properties: [
+ getConnectionHintNoticeField([NodeConnectionType.AiChain, NodeConnectionType.AiAgent]),
+ {
+ displayName: 'Model',
+ name: 'model',
+ type: 'options',
+ description:
+ 'The model which will generate the completion. Learn more.',
+ typeOptions: {
+ loadOptions: {
+ routing: {
+ request: {
+ method: 'GET',
+ url: '/models',
+ },
+ output: {
+ postReceive: [
+ {
+ type: 'rootProperty',
+ properties: {
+ property: 'data',
+ },
+ },
+ {
+ type: 'filter',
+ properties: {
+ pass: "={{ !$responseItem.id.includes('embed') }}",
+ },
+ },
+ {
+ type: 'setKeyValue',
+ properties: {
+ name: '={{ $responseItem.id }}',
+ value: '={{ $responseItem.id }}',
+ },
+ },
+ {
+ type: 'sort',
+ properties: {
+ key: 'name',
+ },
+ },
+ ],
+ },
+ },
+ },
+ },
+ routing: {
+ send: {
+ type: 'body',
+ property: 'model',
+ },
+ },
+ default: 'mistral-small',
+ },
+ {
+ displayName: 'Options',
+ name: 'options',
+ placeholder: 'Add Option',
+ description: 'Additional options to add',
+ type: 'collection',
+ default: {},
+ options: [
+ {
+ displayName: 'Maximum Number of Tokens',
+ name: 'maxTokens',
+ default: -1,
+ description:
+ 'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',
+ type: 'number',
+ typeOptions: {
+ maxValue: 32768,
+ },
+ },
+ {
+ displayName: 'Sampling Temperature',
+ name: 'temperature',
+ default: 0.7,
+ typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
+ description:
+ 'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
+ type: 'number',
+ },
+ {
+ displayName: 'Max Retries',
+ name: 'maxRetries',
+ default: 2,
+ description: 'Maximum number of retries to attempt',
+ type: 'number',
+ },
+ {
+ displayName: 'Top P',
+ name: 'topP',
+ default: 1,
+ typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
+ description:
+ 'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
+ type: 'number',
+ },
+ {
+ displayName: 'Enable Safe Mode',
+ name: 'safeMode',
+ default: false,
+ type: 'boolean',
+ description: 'Whether to inject a safety prompt before all conversations',
+ },
+ {
+ displayName: 'Random Seed',
+ name: 'randomSeed',
+ default: undefined,
+ type: 'number',
+ description:
+ 'The seed to use for random sampling. If set, different calls will generate deterministic results.',
+ },
+ ],
+ },
+ ],
+ };
+
+ async supplyData(this: IExecuteFunctions, itemIndex: number): Promise {
+ const credentials = await this.getCredentials('mistralCloudApi');
+
+ const modelName = this.getNodeParameter('model', itemIndex) as string;
+ const options = this.getNodeParameter('options', itemIndex, {
+ maxRetries: 2,
+ topP: 1,
+ temperature: 0.7,
+ maxTokens: -1,
+ safeMode: false,
+ randomSeed: undefined,
+ }) as Partial;
+
+ const model = new ChatMistralAI({
+ apiKey: credentials.apiKey as string,
+ modelName,
+ ...options,
+ });
+
+ return {
+ response: logWrapper(model, this),
+ };
+ }
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/llms/LmChatMistralCloud/mistral.svg b/packages/@n8n/nodes-langchain/nodes/llms/LmChatMistralCloud/mistral.svg
new file mode 100644
index 0000000000..928faf4518
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/llms/LmChatMistralCloud/mistral.svg
@@ -0,0 +1,262 @@
+
+
diff --git a/packages/@n8n/nodes-langchain/package.json b/packages/@n8n/nodes-langchain/package.json
index 2f64bceda9..d0c5e6c843 100644
--- a/packages/@n8n/nodes-langchain/package.json
+++ b/packages/@n8n/nodes-langchain/package.json
@@ -31,6 +31,7 @@
"dist/credentials/GooglePalmApi.credentials.js",
"dist/credentials/HuggingFaceApi.credentials.js",
"dist/credentials/MotorheadApi.credentials.js",
+ "dist/credentials/MistralCloudApi.credentials.js",
"dist/credentials/OllamaApi.credentials.js",
"dist/credentials/PineconeApi.credentials.js",
"dist/credentials/QdrantApi.credentials.js",
@@ -54,11 +55,13 @@
"dist/nodes/embeddings/EmbeddingsAwsBedrock/EmbeddingsAwsBedrock.node.js",
"dist/nodes/embeddings/EmbeddingsGooglePalm/EmbeddingsGooglePalm.node.js",
"dist/nodes/embeddings/EmbeddingsHuggingFaceInference/EmbeddingsHuggingFaceInference.node.js",
+ "dist/nodes/embeddings/EmbeddingsMistralCloud/EmbeddingsMistralCloud.node.js",
"dist/nodes/embeddings/EmbeddingsOpenAI/EmbeddingsOpenAi.node.js",
"dist/nodes/llms/LMChatAnthropic/LmChatAnthropic.node.js",
"dist/nodes/llms/LmGooglePalm/LmGooglePalm.node.js",
"dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js",
"dist/nodes/llms/LmChatGooglePalm/LmChatGooglePalm.node.js",
+ "dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js",
"dist/nodes/llms/LMChatOllama/LmChatOllama.node.js",
"dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js",
"dist/nodes/llms/LMOpenAi/LmOpenAi.node.js",
@@ -118,6 +121,8 @@
"@getzep/zep-js": "0.9.0",
"@google-ai/generativelanguage": "0.2.1",
"@huggingface/inference": "2.6.4",
+ "@langchain/core": "0.1.8",
+ "@langchain/mistralai": "0.0.6",
"@n8n/vm2": "3.9.20",
"@pinecone-database/pinecone": "1.1.2",
"@qdrant/js-client-rest": "1.7.0",
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 31b2262924..28e79e4047 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -21,6 +21,7 @@ overrides:
xml2js: ^0.5.0
cpy@8>globby: ^11.1.0
qqjs>globby: ^11.1.0
+ '@langchain/core': ^0.1.8
patchedDependencies:
'@sentry/cli@2.17.0':
@@ -186,6 +187,12 @@ importers:
'@huggingface/inference':
specifier: 2.6.4
version: 2.6.4
+ '@langchain/core':
+ specifier: ^0.1.8
+ version: 0.1.8
+ '@langchain/mistralai':
+ specifier: 0.0.6
+ version: 0.0.6
'@n8n/vm2':
specifier: 3.9.20
version: 3.9.20
@@ -218,7 +225,7 @@ importers:
version: 1.2.0
langchain:
specifier: 0.0.198
- version: 0.0.198(@aws-sdk/client-bedrock-runtime@3.454.0)(@getzep/zep-js@0.9.0)(@google-ai/generativelanguage@0.2.1)(@huggingface/inference@2.6.4)(@pinecone-database/pinecone@1.1.2)(@qdrant/js-client-rest@1.7.0)(@supabase/supabase-js@2.38.5)(@xata.io/client@0.25.3)(cohere-ai@6.2.2)(d3-dsv@2.0.0)(epub2@3.0.1)(html-to-text@9.0.5)(lodash@4.17.21)(mammoth@1.6.0)(pdf-parse@1.1.1)(pg@8.11.3)(redis@4.6.11)(typeorm@0.3.17)
+ version: 0.0.198(@aws-sdk/client-bedrock-runtime@3.454.0)(@getzep/zep-js@0.9.0)(@google-ai/generativelanguage@0.2.1)(@huggingface/inference@2.6.4)(@pinecone-database/pinecone@1.1.2)(@qdrant/js-client-rest@1.7.0)(@supabase/supabase-js@2.38.5)(@xata.io/client@0.25.3)(axios@1.6.2)(cohere-ai@6.2.2)(d3-dsv@2.0.0)(epub2@3.0.1)(html-to-text@9.0.5)(lodash@4.17.21)(mammoth@1.6.0)(pdf-parse@1.1.1)(pg@8.11.3)(redis@4.6.11)(typeorm@0.3.17)
lodash:
specifier: 4.17.21
version: 4.17.21
@@ -5878,8 +5885,8 @@ packages:
resolution: {integrity: sha512-GaHYm+c0O9MjZRu0ongGBRbinu8gVAMd2UZjji6jVmqKtZluZnptXGWhz1E8j8D2HJ3f/yMxKAUC0b+57wncIw==}
dev: false
- /@langchain/core@0.0.2:
- resolution: {integrity: sha512-Q3koIjjI295wUKrSMLTXoc3GUyGOS8L4NiLNWD05lozv9mXCNYG6/kmykzQLbiSWXVRKG4zQ82Kr7EHb39tQQw==}
+ /@langchain/core@0.1.8:
+ resolution: {integrity: sha512-ZTQ/NFjBbOKktVL+BlT/Fal5Ys0GAhygWeWdGNoFZg0qJfSt54fQzFhljNSpnQQ4Wavj8NkkCLiSFMnxDAuHjg==}
engines: {node: '>=18'}
dependencies:
ansi-styles: 5.2.0
@@ -5887,12 +5894,23 @@ packages:
decamelize: 1.2.0
js-tiktoken: 1.0.8
langsmith: 0.0.48
+ ml-distance: 4.0.1
p-queue: 6.6.2
p-retry: 4.6.2
uuid: 9.0.0
zod: 3.22.4
dev: false
+ /@langchain/mistralai@0.0.6:
+ resolution: {integrity: sha512-zA/xxKNF+rDM9IF1uvVx+LI/eWPZSO85tJBX60ENeQrcM35np92Sm3ca0D4ixcdBAkG0vnn+9ELcYHGdknCbHQ==}
+ engines: {node: '>=18'}
+ dependencies:
+ '@langchain/core': 0.1.8
+ '@mistralai/mistralai': 0.0.7
+ transitivePeerDependencies:
+ - debug
+ dev: false
+
/@lezer/common@1.1.0:
resolution: {integrity: sha512-XPIN3cYDXsoJI/oDWoR2tD++juVrhgIago9xyKhZ7IhGlzdDM9QgC8D8saKNCz5pindGcznFr2HBSsEQSWnSjw==}
dev: false
@@ -6020,6 +6038,15 @@ packages:
resolution: {integrity: sha512-M/BexG/p05C5lFfMunxo/QcgIJnMT2vDVCd00wNqK2ImZONIlEETZwWJu1QtLxtmYlSHlCFl3JNzp0tLe7OJ5g==}
dev: true
+ /@mistralai/mistralai@0.0.7:
+ resolution: {integrity: sha512-47FiV/GBnt6gug99ZfDBcBofYuYvqT5AyhUDdtktUbCN+gq52tmiAbtwc88k7hlyUWHzJ28VpHRDfNTRfaWKxA==}
+ dependencies:
+ axios: 1.6.2(debug@3.2.7)
+ axios-retry: 4.0.0(axios@1.6.2)
+ transitivePeerDependencies:
+ - debug
+ dev: false
+
/@mongodb-js/saslprep@1.1.0:
resolution: {integrity: sha512-Xfijy7HvfzzqiOAhAepF4SGN5e9leLkMvg/OPOF97XemjfVCYN/oWa75wnkc6mltMSTwY+XlbhWgUOJmkFspSw==}
dependencies:
@@ -12059,6 +12086,15 @@ packages:
is-retry-allowed: 2.2.0
dev: false
+ /axios-retry@4.0.0(axios@1.6.2):
+ resolution: {integrity: sha512-F6P4HVGITD/v4z9Lw2mIA24IabTajvpDZmKa6zq/gGwn57wN5j1P3uWrAV0+diqnW6kTM2fTqmWNfgYWGmMuiA==}
+ peerDependencies:
+ axios: 0.x || 1.x
+ dependencies:
+ axios: 1.6.2(debug@3.2.7)
+ is-retry-allowed: 2.2.0
+ dev: false
+
/axios@0.21.4:
resolution: {integrity: sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg==}
dependencies:
@@ -18590,7 +18626,7 @@ packages:
resolution: {integrity: sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==}
dev: false
- /langchain@0.0.198(@aws-sdk/client-bedrock-runtime@3.454.0)(@getzep/zep-js@0.9.0)(@google-ai/generativelanguage@0.2.1)(@huggingface/inference@2.6.4)(@pinecone-database/pinecone@1.1.2)(@qdrant/js-client-rest@1.7.0)(@supabase/supabase-js@2.38.5)(@xata.io/client@0.25.3)(cohere-ai@6.2.2)(d3-dsv@2.0.0)(epub2@3.0.1)(html-to-text@9.0.5)(lodash@4.17.21)(mammoth@1.6.0)(pdf-parse@1.1.1)(pg@8.11.3)(redis@4.6.11)(typeorm@0.3.17):
+ /langchain@0.0.198(@aws-sdk/client-bedrock-runtime@3.454.0)(@getzep/zep-js@0.9.0)(@google-ai/generativelanguage@0.2.1)(@huggingface/inference@2.6.4)(@pinecone-database/pinecone@1.1.2)(@qdrant/js-client-rest@1.7.0)(@supabase/supabase-js@2.38.5)(@xata.io/client@0.25.3)(axios@1.6.2)(cohere-ai@6.2.2)(d3-dsv@2.0.0)(epub2@3.0.1)(html-to-text@9.0.5)(lodash@4.17.21)(mammoth@1.6.0)(pdf-parse@1.1.1)(pg@8.11.3)(redis@4.6.11)(typeorm@0.3.17):
resolution: {integrity: sha512-YC0O1g8r61InCWyF5NmiQjdghdq6LKcgMrDZtqLbgDxAe4RoSldonm+5oNXS3yjCISG0j3s5Cty+yB7klqvUpg==}
engines: {node: '>=18'}
peerDependencies:
@@ -18901,11 +18937,12 @@ packages:
'@getzep/zep-js': 0.9.0
'@google-ai/generativelanguage': 0.2.1
'@huggingface/inference': 2.6.4
- '@langchain/core': 0.0.2
+ '@langchain/core': 0.1.8
'@pinecone-database/pinecone': 1.1.2
'@qdrant/js-client-rest': 1.7.0(typescript@5.3.2)
'@supabase/supabase-js': 2.38.5
'@xata.io/client': 0.25.3(typescript@5.3.2)
+ axios: 1.6.2(debug@3.2.7)
binary-extensions: 2.2.0
cohere-ai: 6.2.2
d3-dsv: 2.0.0