feat(Google Gemini Chat Model Node): Add support for new Google Gemini models (#9130)

Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
Co-authored-by: Michael Kret <michael.k@radency.com>
This commit is contained in:
oleg 2024-04-15 13:56:44 +02:00 committed by GitHub
parent fa93fb81b0
commit f1215cdb6b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 483 additions and 14 deletions

View file

@ -8,7 +8,7 @@ import type {
export class GooglePalmApi implements ICredentialType {
name = 'googlePalmApi';
displayName = 'GooglePaLMApi';
displayName = 'Google Gemini(PaLM) Api';
documentationUrl = 'google';
@ -41,7 +41,7 @@ export class GooglePalmApi implements ICredentialType {
test: ICredentialTestRequest = {
request: {
baseURL: '={{$credentials.host}}/v1beta3/models',
baseURL: '={{$credentials.host}}/v1beta/models',
},
};
}

View file

@ -75,6 +75,7 @@ function getInputs(
'@n8n/n8n-nodes-langchain.lmChatOllama',
'@n8n/n8n-nodes-langchain.lmChatOpenAi',
'@n8n/n8n-nodes-langchain.lmChatGooglePalm',
'@n8n/n8n-nodes-langchain.lmChatGoogleGemini',
'@n8n/n8n-nodes-langchain.lmChatMistralCloud',
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
],

View file

@ -21,6 +21,7 @@ import { CombiningOutputParser } from 'langchain/output_parsers';
import { LLMChain } from 'langchain/chains';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { HumanMessage } from '@langchain/core/messages';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import { getTemplateNoticeField } from '../../../utils/sharedFields';
import {
getOptionalOutputParsers,
@ -74,14 +75,19 @@ async function getImageMessage(
}
const bufferData = await context.helpers.getBinaryDataBuffer(itemIndex, binaryDataKey);
const model = (await context.getInputConnectionData(
NodeConnectionType.AiLanguageModel,
0,
)) as BaseLanguageModel;
const dataURI = `data:image/jpeg;base64,${bufferData.toString('base64')}`;
const imageUrl = model instanceof ChatGoogleGenerativeAI ? dataURI : { url: dataURI, detail };
return new HumanMessage({
content: [
{
type: 'image_url',
image_url: {
url: `data:image/jpeg;base64,${bufferData.toString('base64')}`,
detail,
},
image_url: imageUrl,
},
],
});

View file

@ -0,0 +1,136 @@
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import {
NodeConnectionType,
type IExecuteFunctions,
type INodeType,
type INodeTypeDescription,
type SupplyData,
} from 'n8n-workflow';
import { GoogleGenerativeAIEmbeddings } from '@langchain/google-genai';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
export class EmbeddingsGoogleGemini implements INodeType {
description: INodeTypeDescription = {
displayName: 'Embeddings Google Gemini',
name: 'embeddingsGoogleGemini',
icon: 'file:google.svg',
group: ['transform'],
version: 1,
description: 'Use Google Gemini Embeddings',
defaults: {
name: 'Embeddings Google Gemini',
},
requestDefaults: {
ignoreHttpStatusErrors: true,
baseURL: '={{ $credentials.host }}',
},
credentials: [
{
name: 'googlePalmApi',
required: true,
},
],
codex: {
categories: ['AI'],
subcategories: {
AI: ['Embeddings'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.embeddingsgooglegemini/',
},
],
},
},
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
inputs: [],
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
outputs: [NodeConnectionType.AiEmbedding],
outputNames: ['Embeddings'],
properties: [
getConnectionHintNoticeField([NodeConnectionType.AiVectorStore]),
{
displayName:
'Each model is using different dimensional density for embeddings. Please make sure to use the same dimensionality for your vector store. The default model is using 768-dimensional embeddings.',
name: 'notice',
type: 'notice',
default: '',
},
{
displayName: 'Model',
name: 'modelName',
type: 'options',
description:
'The model which will generate the embeddings. <a href="https://developers.generativeai.google/api/rest/generativelanguage/models/list">Learn more</a>.',
typeOptions: {
loadOptions: {
routing: {
request: {
method: 'GET',
url: '/v1beta/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'filter',
properties: {
pass: "={{ $responseItem.name.includes('embedding') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
},
routing: {
send: {
type: 'body',
property: 'model',
},
},
default: 'textembedding-gecko-multilingual@latest',
},
],
};
async supplyData(this: IExecuteFunctions, itemIndex: number): Promise<SupplyData> {
this.logger.verbose('Supply data for embeddings Google Gemini');
const modelName = this.getNodeParameter(
'modelName',
itemIndex,
'textembedding-gecko-multilingual@latest',
) as string;
const credentials = await this.getCredentials('googlePalmApi');
const embeddings = new GoogleGenerativeAIEmbeddings({
apiKey: credentials.apiKey as string,
modelName,
});
return {
response: logWrapper(embeddings, this),
};
}
}

View file

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 48 48"><defs><path id="a" d="M44.5 20H24v8.5h11.8C34.7 33.9 30.1 37 24 37c-7.2 0-13-5.8-13-13s5.8-13 13-13c3.1 0 5.9 1.1 8.1 2.9l6.4-6.4C34.6 4.1 29.6 2 24 2 11.8 2 2 11.8 2 24s9.8 22 22 22c11 0 21-8 21-22 0-1.3-.2-2.7-.5-4"/></defs><clipPath id="b"><use xlink:href="#a" overflow="visible"/></clipPath><path fill="#FBBC05" d="M0 37V11l17 13z" clip-path="url(#b)"/><path fill="#EA4335" d="m0 11 17 13 7-6.1L48 14V0H0z" clip-path="url(#b)"/><path fill="#34A853" d="m0 37 30-23 7.9 1L48 0v48H0z" clip-path="url(#b)"/><path fill="#4285F4" d="M48 48 17 24l-4-3 35-10z" clip-path="url(#b)"/></svg>

After

Width:  |  Height:  |  Size: 687 B

View file

@ -0,0 +1,233 @@
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import {
NodeConnectionType,
type IExecuteFunctions,
type INodeType,
type INodeTypeDescription,
type SupplyData,
} from 'n8n-workflow';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import type { HarmBlockThreshold, HarmCategory, SafetySetting } from '@google/generative-ai';
import { logWrapper } from '../../../utils/logWrapper';
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
import { harmCategories, harmThresholds } from './options';
export class LmChatGoogleGemini implements INodeType {
description: INodeTypeDescription = {
displayName: 'Google Gemini Chat Model',
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
name: 'lmChatGoogleGemini',
icon: 'file:google.svg',
group: ['transform'],
version: 1,
description: 'Chat Model Google Gemini',
defaults: {
name: 'Google Gemini Chat Model',
},
codex: {
categories: ['AI'],
subcategories: {
AI: ['Language Models'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/',
},
],
},
},
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
inputs: [],
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
outputs: [NodeConnectionType.AiLanguageModel],
outputNames: ['Model'],
credentials: [
{
name: 'googlePalmApi',
required: true,
},
],
requestDefaults: {
ignoreHttpStatusErrors: true,
baseURL: '={{ $credentials.host }}',
},
properties: [
getConnectionHintNoticeField([NodeConnectionType.AiChain, NodeConnectionType.AiAgent]),
{
displayName: 'Model',
name: 'modelName',
type: 'options',
description:
'The model which will generate the completion. <a href="https://developers.generativeai.google/api/rest/generativelanguage/models/list">Learn more</a>.',
typeOptions: {
loadOptions: {
routing: {
request: {
method: 'GET',
url: '/v1beta/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'models',
},
},
{
type: 'filter',
properties: {
pass: "={{ !$responseItem.name.includes('embedding') }}",
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.name}}',
value: '={{$responseItem.name}}',
description: '={{$responseItem.description}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
},
routing: {
send: {
type: 'body',
property: 'model',
},
},
default: 'models/gemini-1.0-pro',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
description: 'Additional options to add',
type: 'collection',
default: {},
options: [
{
displayName: 'Maximum Number of Tokens',
name: 'maxOutputTokens',
default: 2048,
description: 'The maximum number of tokens to generate in the completion',
type: 'number',
},
{
displayName: 'Sampling Temperature',
name: 'temperature',
default: 0.4,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
},
{
displayName: 'Top K',
name: 'topK',
default: 32,
typeOptions: { maxValue: 40, minValue: -1, numberPrecision: 1 },
description:
'Used to remove "long tail" low probability responses. Defaults to -1, which disables it.',
type: 'number',
},
{
displayName: 'Top P',
name: 'topP',
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
type: 'number',
},
// Safety Settings
{
displayName: 'Safety Settings',
name: 'safetySettings',
type: 'fixedCollection',
typeOptions: { multipleValues: true },
default: {
values: {
category: harmCategories[0].name as HarmCategory,
threshold: harmThresholds[0].name as HarmBlockThreshold,
},
},
placeholder: 'Add Option',
options: [
{
name: 'values',
displayName: 'Values',
values: [
{
displayName: 'Safety Category',
name: 'category',
type: 'options',
description: 'The category of harmful content to block',
default: 'HARM_CATEGORY_UNSPECIFIED',
options: harmCategories,
},
{
displayName: 'Safety Threshold',
name: 'threshold',
type: 'options',
description: 'The threshold of harmful content to block',
default: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
options: harmThresholds,
},
],
},
],
},
],
},
],
};
async supplyData(this: IExecuteFunctions, itemIndex: number): Promise<SupplyData> {
const credentials = await this.getCredentials('googlePalmApi');
const modelName = this.getNodeParameter('modelName', itemIndex) as string;
const options = this.getNodeParameter('options', itemIndex, {
maxOutputTokens: 1024,
temperature: 0.7,
topK: 40,
topP: 0.9,
}) as {
maxOutputTokens: number;
temperature: number;
topK: number;
topP: number;
};
const safetySettings = this.getNodeParameter(
'options.safetySettings.values',
itemIndex,
null,
) as SafetySetting[];
const model = new ChatGoogleGenerativeAI({
apiKey: credentials.apiKey as string,
modelName,
topK: options.topK,
topP: options.topP,
temperature: options.temperature,
maxOutputTokens: options.maxOutputTokens,
safetySettings,
});
return {
response: logWrapper(model, this),
};
}
}

View file

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 48 48"><defs><path id="a" d="M44.5 20H24v8.5h11.8C34.7 33.9 30.1 37 24 37c-7.2 0-13-5.8-13-13s5.8-13 13-13c3.1 0 5.9 1.1 8.1 2.9l6.4-6.4C34.6 4.1 29.6 2 24 2 11.8 2 2 11.8 2 24s9.8 22 22 22c11 0 21-8 21-22 0-1.3-.2-2.7-.5-4"/></defs><clipPath id="b"><use xlink:href="#a" overflow="visible"/></clipPath><path fill="#FBBC05" d="M0 37V11l17 13z" clip-path="url(#b)"/><path fill="#EA4335" d="m0 11 17 13 7-6.1L48 14V0H0z" clip-path="url(#b)"/><path fill="#34A853" d="m0 37 30-23 7.9 1L48 0v48H0z" clip-path="url(#b)"/><path fill="#4285F4" d="M48 48 17 24l-4-3 35-10z" clip-path="url(#b)"/></svg>

After

Width:  |  Height:  |  Size: 687 B

View file

@ -0,0 +1,52 @@
import type { INodePropertyOptions } from 'n8n-workflow';
export const harmCategories: INodePropertyOptions[] = [
{
value: 'HARM_CATEGORY_HARASSMENT',
name: 'HARM_CATEGORY_HARASSMENT',
description: 'Harassment content',
},
{
value: 'HARM_CATEGORY_HATE_SPEECH',
name: 'HARM_CATEGORY_HATE_SPEECH',
description: 'Hate speech and content',
},
{
value: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
name: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
description: 'Sexually explicit content',
},
{
value: 'HARM_CATEGORY_DANGEROUS_CONTENT',
name: 'HARM_CATEGORY_DANGEROUS_CONTENT',
description: 'Dangerous content',
},
];
export const harmThresholds: INodePropertyOptions[] = [
{
value: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
name: 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
description: 'Threshold is unspecified',
},
{
value: 'BLOCK_LOW_AND_ABOVE',
name: 'BLOCK_LOW_AND_ABOVE',
description: 'Content with NEGLIGIBLE will be allowed',
},
{
value: 'BLOCK_MEDIUM_AND_ABOVE',
name: 'BLOCK_MEDIUM_AND_ABOVE',
description: 'Content with NEGLIGIBLE and LOW will be allowed',
},
{
value: 'BLOCK_ONLY_HIGH',
name: 'BLOCK_ONLY_HIGH',
description: 'Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed',
},
{
value: 'BLOCK_NONE',
name: 'BLOCK_NONE',
description: 'All content will be allowed',
},
];

View file

@ -59,6 +59,7 @@
"dist/nodes/embeddings/EmbeddingsAwsBedrock/EmbeddingsAwsBedrock.node.js",
"dist/nodes/embeddings/EmbeddingsAzureOpenAi/EmbeddingsAzureOpenAi.node.js",
"dist/nodes/embeddings/EmbeddingsGooglePalm/EmbeddingsGooglePalm.node.js",
"dist/nodes/embeddings/EmbeddingsGoogleGemini/EmbeddingsGoogleGemini.node.js",
"dist/nodes/embeddings/EmbeddingsHuggingFaceInference/EmbeddingsHuggingFaceInference.node.js",
"dist/nodes/embeddings/EmbeddingsMistralCloud/EmbeddingsMistralCloud.node.js",
"dist/nodes/embeddings/EmbeddingsOpenAI/EmbeddingsOpenAi.node.js",
@ -68,6 +69,7 @@
"dist/nodes/llms/LmGooglePalm/LmGooglePalm.node.js",
"dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js",
"dist/nodes/llms/LmChatGooglePalm/LmChatGooglePalm.node.js",
"dist/nodes/llms/LmChatGoogleGemini/LmChatGoogleGemini.node.js",
"dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js",
"dist/nodes/llms/LMChatOllama/LmChatOllama.node.js",
"dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js",
@ -131,11 +133,13 @@
"@aws-sdk/credential-provider-node": "3.549.0",
"@getzep/zep-js": "0.9.0",
"@google-ai/generativelanguage": "0.2.1",
"@google/generative-ai": "^0.5.0",
"@huggingface/inference": "2.6.4",
"@langchain/anthropic": "^0.1.3",
"@langchain/cohere": "^0.0.5",
"@langchain/community": "0.0.44",
"@langchain/core": "0.1.41",
"@langchain/google-genai": "^0.0.11",
"@langchain/mistralai": "0.0.13",
"@langchain/openai": "^0.0.16",
"@langchain/pinecone": "^0.0.3",
@ -161,10 +165,10 @@
"openai": "4.26.1",
"pdf-parse": "1.1.1",
"pg": "8.11.3",
"tmp-promise": "3.0.3",
"redis": "4.6.12",
"sqlite3": "5.1.7",
"temp": "0.9.4",
"tmp-promise": "3.0.3",
"zod": "3.22.4",
"zod-to-json-schema": "3.22.4"
}

View file

@ -84,7 +84,9 @@ const outputTypeParsers: {
[NodeConnectionType.AiAgent]: fallbackParser,
[NodeConnectionType.AiMemory](execData: IDataObject) {
const chatHistory =
execData.chatHistory ?? execData.messages ?? execData?.response?.chat_history;
execData.chatHistory ??
execData.messages ??
(execData?.response as IDataObject)?.chat_history;
if (Array.isArray(chatHistory)) {
const responseText = chatHistory
.map((content: MemoryMessage) => {
@ -96,16 +98,25 @@ const outputTypeParsers: {
interface MessageContent {
type: string;
text?: string;
image_url?: {
url: string;
};
image_url?:
| {
url: string;
}
| string;
}
let message = content.kwargs.content;
if (Array.isArray(message)) {
message = (message as MessageContent[])
.map((item) => {
if (item?.type === 'image_url') {
return `![Input image](${item.image_url?.url})`;
const { type, image_url } = item;
if (
type === 'image_url' &&
typeof image_url === 'object' &&
typeof image_url.url === 'string'
) {
return `![Input image](${image_url.url})`;
} else if (typeof image_url === 'string') {
return `![Input image](${image_url})`;
}
return item.text;
})
@ -115,7 +126,7 @@ const outputTypeParsers: {
message += ` (${JSON.stringify(content.kwargs.additional_kwargs)})`;
}
if (content.id.includes('HumanMessage')) {
message = `**Human:** ${message.trim()}`;
message = `**Human:** ${String(message).trim()}`;
} else if (content.id.includes('AIMessage')) {
message = `**AI:** ${message}`;
} else if (content.id.includes('SystemMessage')) {

View file

@ -229,6 +229,9 @@ importers:
'@google-ai/generativelanguage':
specifier: 0.2.1
version: 0.2.1
'@google/generative-ai':
specifier: ^0.5.0
version: 0.5.0
'@huggingface/inference':
specifier: 2.6.4
version: 2.6.4
@ -244,6 +247,9 @@ importers:
'@langchain/core':
specifier: 0.1.41
version: 0.1.41
'@langchain/google-genai':
specifier: ^0.0.11
version: 0.0.11
'@langchain/mistralai':
specifier: 0.0.13
version: 0.0.13
@ -5203,6 +5209,16 @@ packages:
- supports-color
dev: false
/@google/generative-ai@0.1.3:
resolution: {integrity: sha512-Cm4uJX1sKarpm1mje/MiOIinM7zdUUrQp/5/qGPAgznbdd/B9zup5ehT6c1qGqycFcSopTA1J1HpqHS5kJR8hQ==}
engines: {node: '>=18.0.0'}
dev: false
/@google/generative-ai@0.5.0:
resolution: {integrity: sha512-uxfN3mROgVxb+9KrlVHIiglcWgNE86pVTS9TRkGS6hMvQoZ5TrB1TIMcW5ZYWxlRhMwtmEdJnNMUxSqM3Qp/Ag==}
engines: {node: '>=18.0.0'}
dev: false
/@grpc/grpc-js@1.8.21:
resolution: {integrity: sha512-KeyQeZpxeEBSqFVTi3q2K7PiPXmgBfECc4updA1ejCLjYmoAlvvM3ZMp5ztTDUCUQmoY3CpDxvchjO1+rFkoHg==}
engines: {node: ^8.13.0 || >=10.10.0}
@ -6350,6 +6366,14 @@ packages:
zod-to-json-schema: 3.22.4(zod@3.22.4)
dev: false
/@langchain/google-genai@0.0.11:
resolution: {integrity: sha512-o4+r+ETmcPqcrRTJeJQQ0c796IAx1dvVkZvFsUqLhTIteIQuAc2KenY/UDGQxZVghw6fZf4irN/PvkNHJjfgWw==}
engines: {node: '>=18'}
dependencies:
'@google/generative-ai': 0.1.3
'@langchain/core': 0.1.41
dev: false
/@langchain/mistralai@0.0.13:
resolution: {integrity: sha512-0oNTICsukEnZLJ1HwtlCADZi5jqircK8B+svLrRbp+1HVue5hXPsU36b54mr0WEwhmY0QIXJ9CwEaGRSfEEZcg==}
engines: {node: '>=18'}