diff --git a/packages/@n8n/nodes-langchain/credentials/GooglePalmApi.credentials.ts b/packages/@n8n/nodes-langchain/credentials/GooglePalmApi.credentials.ts
index 853157946f..3c474e3198 100644
--- a/packages/@n8n/nodes-langchain/credentials/GooglePalmApi.credentials.ts
+++ b/packages/@n8n/nodes-langchain/credentials/GooglePalmApi.credentials.ts
@@ -8,7 +8,7 @@ import type {
export class GooglePalmApi implements ICredentialType {
name = 'googlePalmApi';
- displayName = 'GooglePaLMApi';
+ displayName = 'Google Gemini(PaLM) Api';
documentationUrl = 'google';
@@ -41,7 +41,7 @@ export class GooglePalmApi implements ICredentialType {
test: ICredentialTestRequest = {
request: {
- baseURL: '={{$credentials.host}}/v1beta3/models',
+ baseURL: '={{$credentials.host}}/v1beta/models',
},
};
}
diff --git a/packages/@n8n/nodes-langchain/nodes/agents/Agent/Agent.node.ts b/packages/@n8n/nodes-langchain/nodes/agents/Agent/Agent.node.ts
index 2112b401d9..ddc3d8372d 100644
--- a/packages/@n8n/nodes-langchain/nodes/agents/Agent/Agent.node.ts
+++ b/packages/@n8n/nodes-langchain/nodes/agents/Agent/Agent.node.ts
@@ -75,6 +75,7 @@ function getInputs(
'@n8n/n8n-nodes-langchain.lmChatOllama',
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'@n8n/n8n-nodes-langchain.lmChatGooglePalm',
+ '@n8n/n8n-nodes-langchain.lmChatGoogleGemini',
'@n8n/n8n-nodes-langchain.lmChatMistralCloud',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
],
diff --git a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts
index 5f8dcde626..578dc791c1 100644
--- a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts
+++ b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts
@@ -21,6 +21,7 @@ import { CombiningOutputParser } from 'langchain/output_parsers';
import { LLMChain } from 'langchain/chains';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { HumanMessage } from '@langchain/core/messages';
+import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import { getTemplateNoticeField } from '../../../utils/sharedFields';
import {
getOptionalOutputParsers,
@@ -74,14 +75,19 @@ async function getImageMessage(
}
const bufferData = await context.helpers.getBinaryDataBuffer(itemIndex, binaryDataKey);
+ const model = (await context.getInputConnectionData(
+ NodeConnectionType.AiLanguageModel,
+ 0,
+ )) as BaseLanguageModel;
+ const dataURI = `data:image/jpeg;base64,${bufferData.toString('base64')}`;
+
+ const imageUrl = model instanceof ChatGoogleGenerativeAI ? dataURI : { url: dataURI, detail };
+
return new HumanMessage({
content: [
{
type: 'image_url',
- image_url: {
- url: `data:image/jpeg;base64,${bufferData.toString('base64')}`,
- detail,
- },
+ image_url: imageUrl,
},
],
});
diff --git a/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsGoogleGemini/EmbeddingsGoogleGemini.node.ts b/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsGoogleGemini/EmbeddingsGoogleGemini.node.ts
new file mode 100644
index 0000000000..7249b639f6
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsGoogleGemini/EmbeddingsGoogleGemini.node.ts
@@ -0,0 +1,136 @@
+/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
+import {
+ NodeConnectionType,
+ type IExecuteFunctions,
+ type INodeType,
+ type INodeTypeDescription,
+ type SupplyData,
+} from 'n8n-workflow';
+import { GoogleGenerativeAIEmbeddings } from '@langchain/google-genai';
+
+import { logWrapper } from '../../../utils/logWrapper';
+import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
+
+export class EmbeddingsGoogleGemini implements INodeType {
+ description: INodeTypeDescription = {
+ displayName: 'Embeddings Google Gemini',
+ name: 'embeddingsGoogleGemini',
+ icon: 'file:google.svg',
+ group: ['transform'],
+ version: 1,
+ description: 'Use Google Gemini Embeddings',
+ defaults: {
+ name: 'Embeddings Google Gemini',
+ },
+ requestDefaults: {
+ ignoreHttpStatusErrors: true,
+ baseURL: '={{ $credentials.host }}',
+ },
+ credentials: [
+ {
+ name: 'googlePalmApi',
+ required: true,
+ },
+ ],
+ codex: {
+ categories: ['AI'],
+ subcategories: {
+ AI: ['Embeddings'],
+ },
+ resources: {
+ primaryDocumentation: [
+ {
+ url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.embeddingsgooglegemini/',
+ },
+ ],
+ },
+ },
+ // eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
+ inputs: [],
+ // eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
+ outputs: [NodeConnectionType.AiEmbedding],
+ outputNames: ['Embeddings'],
+ properties: [
+ getConnectionHintNoticeField([NodeConnectionType.AiVectorStore]),
+ {
+ displayName:
+ 'Each model is using different dimensional density for embeddings. Please make sure to use the same dimensionality for your vector store. The default model is using 768-dimensional embeddings.',
+ name: 'notice',
+ type: 'notice',
+ default: '',
+ },
+ {
+ displayName: 'Model',
+ name: 'modelName',
+ type: 'options',
+ description:
+ 'The model which will generate the embeddings. Learn more.',
+ typeOptions: {
+ loadOptions: {
+ routing: {
+ request: {
+ method: 'GET',
+ url: '/v1beta/models',
+ },
+ output: {
+ postReceive: [
+ {
+ type: 'rootProperty',
+ properties: {
+ property: 'models',
+ },
+ },
+ {
+ type: 'filter',
+ properties: {
+ pass: "={{ $responseItem.name.includes('embedding') }}",
+ },
+ },
+ {
+ type: 'setKeyValue',
+ properties: {
+ name: '={{$responseItem.name}}',
+ value: '={{$responseItem.name}}',
+ description: '={{$responseItem.description}}',
+ },
+ },
+ {
+ type: 'sort',
+ properties: {
+ key: 'name',
+ },
+ },
+ ],
+ },
+ },
+ },
+ },
+ routing: {
+ send: {
+ type: 'body',
+ property: 'model',
+ },
+ },
+ default: 'textembedding-gecko-multilingual@latest',
+ },
+ ],
+ };
+
+ async supplyData(this: IExecuteFunctions, itemIndex: number): Promise {
+ this.logger.verbose('Supply data for embeddings Google Gemini');
+ const modelName = this.getNodeParameter(
+ 'modelName',
+ itemIndex,
+ 'textembedding-gecko-multilingual@latest',
+ ) as string;
+ const credentials = await this.getCredentials('googlePalmApi');
+ const embeddings = new GoogleGenerativeAIEmbeddings({
+ apiKey: credentials.apiKey as string,
+ modelName,
+ });
+
+ return {
+ response: logWrapper(embeddings, this),
+ };
+ }
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsGoogleGemini/google.svg b/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsGoogleGemini/google.svg
new file mode 100644
index 0000000000..38f3c22592
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/embeddings/EmbeddingsGoogleGemini/google.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/packages/@n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts b/packages/@n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts
new file mode 100644
index 0000000000..425ef2294c
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.ts
@@ -0,0 +1,233 @@
+/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
+import {
+ NodeConnectionType,
+ type IExecuteFunctions,
+ type INodeType,
+ type INodeTypeDescription,
+ type SupplyData,
+} from 'n8n-workflow';
+import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
+import type { HarmBlockThreshold, HarmCategory, SafetySetting } from '@google/generative-ai';
+import { logWrapper } from '../../../utils/logWrapper';
+import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
+import { harmCategories, harmThresholds } from './options';
+
+export class LmChatGoogleGemini implements INodeType {
+ description: INodeTypeDescription = {
+ displayName: 'Google Gemini Chat Model',
+ // eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
+ name: 'lmChatGoogleGemini',
+ icon: 'file:google.svg',
+ group: ['transform'],
+ version: 1,
+ description: 'Chat Model Google Gemini',
+ defaults: {
+ name: 'Google Gemini Chat Model',
+ },
+ codex: {
+ categories: ['AI'],
+ subcategories: {
+ AI: ['Language Models'],
+ },
+ resources: {
+ primaryDocumentation: [
+ {
+ url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/',
+ },
+ ],
+ },
+ },
+ // eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
+ inputs: [],
+ // eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
+ outputs: [NodeConnectionType.AiLanguageModel],
+ outputNames: ['Model'],
+ credentials: [
+ {
+ name: 'googlePalmApi',
+ required: true,
+ },
+ ],
+ requestDefaults: {
+ ignoreHttpStatusErrors: true,
+ baseURL: '={{ $credentials.host }}',
+ },
+ properties: [
+ getConnectionHintNoticeField([NodeConnectionType.AiChain, NodeConnectionType.AiAgent]),
+ {
+ displayName: 'Model',
+ name: 'modelName',
+ type: 'options',
+ description:
+ 'The model which will generate the completion. Learn more.',
+ typeOptions: {
+ loadOptions: {
+ routing: {
+ request: {
+ method: 'GET',
+ url: '/v1beta/models',
+ },
+ output: {
+ postReceive: [
+ {
+ type: 'rootProperty',
+ properties: {
+ property: 'models',
+ },
+ },
+ {
+ type: 'filter',
+ properties: {
+ pass: "={{ !$responseItem.name.includes('embedding') }}",
+ },
+ },
+ {
+ type: 'setKeyValue',
+ properties: {
+ name: '={{$responseItem.name}}',
+ value: '={{$responseItem.name}}',
+ description: '={{$responseItem.description}}',
+ },
+ },
+ {
+ type: 'sort',
+ properties: {
+ key: 'name',
+ },
+ },
+ ],
+ },
+ },
+ },
+ },
+ routing: {
+ send: {
+ type: 'body',
+ property: 'model',
+ },
+ },
+ default: 'models/gemini-1.0-pro',
+ },
+ {
+ displayName: 'Options',
+ name: 'options',
+ placeholder: 'Add Option',
+ description: 'Additional options to add',
+ type: 'collection',
+ default: {},
+ options: [
+ {
+ displayName: 'Maximum Number of Tokens',
+ name: 'maxOutputTokens',
+ default: 2048,
+ description: 'The maximum number of tokens to generate in the completion',
+ type: 'number',
+ },
+ {
+ displayName: 'Sampling Temperature',
+ name: 'temperature',
+ default: 0.4,
+ typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
+ description:
+ 'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
+ type: 'number',
+ },
+ {
+ displayName: 'Top K',
+ name: 'topK',
+ default: 32,
+ typeOptions: { maxValue: 40, minValue: -1, numberPrecision: 1 },
+ description:
+ 'Used to remove "long tail" low probability responses. Defaults to -1, which disables it.',
+ type: 'number',
+ },
+ {
+ displayName: 'Top P',
+ name: 'topP',
+ default: 1,
+ typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
+ description:
+ 'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
+ type: 'number',
+ },
+
+ // Safety Settings
+ {
+ displayName: 'Safety Settings',
+ name: 'safetySettings',
+ type: 'fixedCollection',
+ typeOptions: { multipleValues: true },
+ default: {
+ values: {
+ category: harmCategories[0].name as HarmCategory,
+ threshold: harmThresholds[0].name as HarmBlockThreshold,
+ },
+ },
+ placeholder: 'Add Option',
+ options: [
+ {
+ name: 'values',
+ displayName: 'Values',
+ values: [
+ {
+ displayName: 'Safety Category',
+ name: 'category',
+ type: 'options',
+ description: 'The category of harmful content to block',
+ default: 'HARM_CATEGORY_UNSPECIFIED',
+ options: harmCategories,
+ },
+ {
+ displayName: 'Safety Threshold',
+ name: 'threshold',
+ type: 'options',
+ description: 'The threshold of harmful content to block',
+ default: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
+ options: harmThresholds,
+ },
+ ],
+ },
+ ],
+ },
+ ],
+ },
+ ],
+ };
+
+ async supplyData(this: IExecuteFunctions, itemIndex: number): Promise {
+ const credentials = await this.getCredentials('googlePalmApi');
+
+ const modelName = this.getNodeParameter('modelName', itemIndex) as string;
+ const options = this.getNodeParameter('options', itemIndex, {
+ maxOutputTokens: 1024,
+ temperature: 0.7,
+ topK: 40,
+ topP: 0.9,
+ }) as {
+ maxOutputTokens: number;
+ temperature: number;
+ topK: number;
+ topP: number;
+ };
+
+ const safetySettings = this.getNodeParameter(
+ 'options.safetySettings.values',
+ itemIndex,
+ null,
+ ) as SafetySetting[];
+
+ const model = new ChatGoogleGenerativeAI({
+ apiKey: credentials.apiKey as string,
+ modelName,
+ topK: options.topK,
+ topP: options.topP,
+ temperature: options.temperature,
+ maxOutputTokens: options.maxOutputTokens,
+ safetySettings,
+ });
+
+ return {
+ response: logWrapper(model, this),
+ };
+ }
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/google.svg b/packages/@n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/google.svg
new file mode 100644
index 0000000000..38f3c22592
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/google.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/packages/@n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/options.ts b/packages/@n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/options.ts
new file mode 100644
index 0000000000..08506cb080
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/llms/LmChatGoogleGemini/options.ts
@@ -0,0 +1,52 @@
+import type { INodePropertyOptions } from 'n8n-workflow';
+
+export const harmCategories: INodePropertyOptions[] = [
+ {
+ value: 'HARM_CATEGORY_HARASSMENT',
+ name: 'HARM_CATEGORY_HARASSMENT',
+ description: 'Harassment content',
+ },
+ {
+ value: 'HARM_CATEGORY_HATE_SPEECH',
+ name: 'HARM_CATEGORY_HATE_SPEECH',
+ description: 'Hate speech and content',
+ },
+ {
+ value: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
+ name: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
+ description: 'Sexually explicit content',
+ },
+ {
+ value: 'HARM_CATEGORY_DANGEROUS_CONTENT',
+ name: 'HARM_CATEGORY_DANGEROUS_CONTENT',
+ description: 'Dangerous content',
+ },
+];
+
+export const harmThresholds: INodePropertyOptions[] = [
+ {
+ value: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
+ name: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
+ description: 'Threshold is unspecified',
+ },
+ {
+ value: 'BLOCK_LOW_AND_ABOVE',
+ name: 'BLOCK_LOW_AND_ABOVE',
+ description: 'Content with NEGLIGIBLE will be allowed',
+ },
+ {
+ value: 'BLOCK_MEDIUM_AND_ABOVE',
+ name: 'BLOCK_MEDIUM_AND_ABOVE',
+ description: 'Content with NEGLIGIBLE and LOW will be allowed',
+ },
+ {
+ value: 'BLOCK_ONLY_HIGH',
+ name: 'BLOCK_ONLY_HIGH',
+ description: 'Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed',
+ },
+ {
+ value: 'BLOCK_NONE',
+ name: 'BLOCK_NONE',
+ description: 'All content will be allowed',
+ },
+];
diff --git a/packages/@n8n/nodes-langchain/package.json b/packages/@n8n/nodes-langchain/package.json
index ae9624ab0f..598c92b5e7 100644
--- a/packages/@n8n/nodes-langchain/package.json
+++ b/packages/@n8n/nodes-langchain/package.json
@@ -59,6 +59,7 @@
"dist/nodes/embeddings/EmbeddingsAwsBedrock/EmbeddingsAwsBedrock.node.js",
"dist/nodes/embeddings/EmbeddingsAzureOpenAi/EmbeddingsAzureOpenAi.node.js",
"dist/nodes/embeddings/EmbeddingsGooglePalm/EmbeddingsGooglePalm.node.js",
+ "dist/nodes/embeddings/EmbeddingsGoogleGemini/EmbeddingsGoogleGemini.node.js",
"dist/nodes/embeddings/EmbeddingsHuggingFaceInference/EmbeddingsHuggingFaceInference.node.js",
"dist/nodes/embeddings/EmbeddingsMistralCloud/EmbeddingsMistralCloud.node.js",
"dist/nodes/embeddings/EmbeddingsOpenAI/EmbeddingsOpenAi.node.js",
@@ -68,6 +69,7 @@
"dist/nodes/llms/LmGooglePalm/LmGooglePalm.node.js",
"dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js",
"dist/nodes/llms/LmChatGooglePalm/LmChatGooglePalm.node.js",
+ "dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js",
"dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js",
"dist/nodes/llms/LMChatOllama/LmChatOllama.node.js",
"dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js",
@@ -131,11 +133,13 @@
"@aws-sdk/credential-provider-node": "3.549.0",
"@getzep/zep-js": "0.9.0",
"@google-ai/generativelanguage": "0.2.1",
+ "@google/generative-ai": "^0.5.0",
"@huggingface/inference": "2.6.4",
"@langchain/anthropic": "^0.1.3",
"@langchain/cohere": "^0.0.5",
"@langchain/community": "0.0.44",
"@langchain/core": "0.1.41",
+ "@langchain/google-genai": "^0.0.11",
"@langchain/mistralai": "0.0.13",
"@langchain/openai": "^0.0.16",
"@langchain/pinecone": "^0.0.3",
@@ -161,10 +165,10 @@
"openai": "4.26.1",
"pdf-parse": "1.1.1",
"pg": "8.11.3",
- "tmp-promise": "3.0.3",
"redis": "4.6.12",
"sqlite3": "5.1.7",
"temp": "0.9.4",
+ "tmp-promise": "3.0.3",
"zod": "3.22.4",
"zod-to-json-schema": "3.22.4"
}
diff --git a/packages/editor-ui/src/components/RunDataAi/useAiContentParsers.ts b/packages/editor-ui/src/components/RunDataAi/useAiContentParsers.ts
index 50d5ccf558..dc7d7bc1a6 100644
--- a/packages/editor-ui/src/components/RunDataAi/useAiContentParsers.ts
+++ b/packages/editor-ui/src/components/RunDataAi/useAiContentParsers.ts
@@ -84,7 +84,9 @@ const outputTypeParsers: {
[NodeConnectionType.AiAgent]: fallbackParser,
[NodeConnectionType.AiMemory](execData: IDataObject) {
const chatHistory =
- execData.chatHistory ?? execData.messages ?? execData?.response?.chat_history;
+ execData.chatHistory ??
+ execData.messages ??
+ (execData?.response as IDataObject)?.chat_history;
if (Array.isArray(chatHistory)) {
const responseText = chatHistory
.map((content: MemoryMessage) => {
@@ -96,16 +98,25 @@ const outputTypeParsers: {
interface MessageContent {
type: string;
text?: string;
- image_url?: {
- url: string;
- };
+ image_url?:
+ | {
+ url: string;
+ }
+ | string;
}
let message = content.kwargs.content;
if (Array.isArray(message)) {
message = (message as MessageContent[])
.map((item) => {
- if (item?.type === 'image_url') {
- return ``;
+ const { type, image_url } = item;
+ if (
+ type === 'image_url' &&
+ typeof image_url === 'object' &&
+ typeof image_url.url === 'string'
+ ) {
+ return ``;
+ } else if (typeof image_url === 'string') {
+ return ``;
}
return item.text;
})
@@ -115,7 +126,7 @@ const outputTypeParsers: {
message += ` (${JSON.stringify(content.kwargs.additional_kwargs)})`;
}
if (content.id.includes('HumanMessage')) {
- message = `**Human:** ${message.trim()}`;
+ message = `**Human:** ${String(message).trim()}`;
} else if (content.id.includes('AIMessage')) {
message = `**AI:** ${message}`;
} else if (content.id.includes('SystemMessage')) {
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index c329d7ffb6..e4e502a9d7 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -229,6 +229,9 @@ importers:
'@google-ai/generativelanguage':
specifier: 0.2.1
version: 0.2.1
+ '@google/generative-ai':
+ specifier: ^0.5.0
+ version: 0.5.0
'@huggingface/inference':
specifier: 2.6.4
version: 2.6.4
@@ -244,6 +247,9 @@ importers:
'@langchain/core':
specifier: 0.1.41
version: 0.1.41
+ '@langchain/google-genai':
+ specifier: ^0.0.11
+ version: 0.0.11
'@langchain/mistralai':
specifier: 0.0.13
version: 0.0.13
@@ -5203,6 +5209,16 @@ packages:
- supports-color
dev: false
+ /@google/generative-ai@0.1.3:
+ resolution: {integrity: sha512-Cm4uJX1sKarpm1mje/MiOIinM7zdUUrQp/5/qGPAgznbdd/B9zup5ehT6c1qGqycFcSopTA1J1HpqHS5kJR8hQ==}
+ engines: {node: '>=18.0.0'}
+ dev: false
+
+ /@google/generative-ai@0.5.0:
+ resolution: {integrity: sha512-uxfN3mROgVxb+9KrlVHIiglcWgNE86pVTS9TRkGS6hMvQoZ5TrB1TIMcW5ZYWxlRhMwtmEdJnNMUxSqM3Qp/Ag==}
+ engines: {node: '>=18.0.0'}
+ dev: false
+
/@grpc/grpc-js@1.8.21:
resolution: {integrity: sha512-KeyQeZpxeEBSqFVTi3q2K7PiPXmgBfECc4updA1ejCLjYmoAlvvM3ZMp5ztTDUCUQmoY3CpDxvchjO1+rFkoHg==}
engines: {node: ^8.13.0 || >=10.10.0}
@@ -6350,6 +6366,14 @@ packages:
zod-to-json-schema: 3.22.4(zod@3.22.4)
dev: false
+ /@langchain/google-genai@0.0.11:
+ resolution: {integrity: sha512-o4+r+ETmcPqcrRTJeJQQ0c796IAx1dvVkZvFsUqLhTIteIQuAc2KenY/UDGQxZVghw6fZf4irN/PvkNHJjfgWw==}
+ engines: {node: '>=18'}
+ dependencies:
+ '@google/generative-ai': 0.1.3
+ '@langchain/core': 0.1.41
+ dev: false
+
/@langchain/mistralai@0.0.13:
resolution: {integrity: sha512-0oNTICsukEnZLJ1HwtlCADZi5jqircK8B+svLrRbp+1HVue5hXPsU36b54mr0WEwhmY0QIXJ9CwEaGRSfEEZcg==}
engines: {node: '>=18'}