2024-04-15 04:56:44 -07:00
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import {
NodeConnectionType ,
type IExecuteFunctions ,
type INodeType ,
type INodeTypeDescription ,
type SupplyData ,
} from 'n8n-workflow' ;
import { ChatGoogleGenerativeAI } from '@langchain/google-genai' ;
import type { HarmBlockThreshold , HarmCategory , SafetySetting } from '@google/generative-ai' ;
import { getConnectionHintNoticeField } from '../../../utils/sharedFields' ;
2024-05-12 12:12:07 -07:00
import { N8nLlmTracing } from '../N8nLlmTracing' ;
2024-04-15 04:56:44 -07:00
import { harmCategories , harmThresholds } from './options' ;
export class LmChatGoogleGemini implements INodeType {
description : INodeTypeDescription = {
displayName : 'Google Gemini Chat Model' ,
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
name : 'lmChatGoogleGemini' ,
icon : 'file:google.svg' ,
group : [ 'transform' ] ,
version : 1 ,
description : 'Chat Model Google Gemini' ,
defaults : {
name : 'Google Gemini Chat Model' ,
} ,
codex : {
categories : [ 'AI' ] ,
subcategories : {
AI : [ 'Language Models' ] ,
} ,
resources : {
primaryDocumentation : [
{
url : 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatgooglegemini/' ,
} ,
] ,
} ,
} ,
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
inputs : [ ] ,
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
outputs : [ NodeConnectionType . AiLanguageModel ] ,
outputNames : [ 'Model' ] ,
credentials : [
{
name : 'googlePalmApi' ,
required : true ,
} ,
] ,
requestDefaults : {
ignoreHttpStatusErrors : true ,
baseURL : '={{ $credentials.host }}' ,
} ,
properties : [
getConnectionHintNoticeField ( [ NodeConnectionType . AiChain , NodeConnectionType . AiAgent ] ) ,
{
displayName : 'Model' ,
name : 'modelName' ,
type : 'options' ,
description :
'The model which will generate the completion. <a href="https://developers.generativeai.google/api/rest/generativelanguage/models/list">Learn more</a>.' ,
typeOptions : {
loadOptions : {
routing : {
request : {
method : 'GET' ,
url : '/v1beta/models' ,
} ,
output : {
postReceive : [
{
type : 'rootProperty' ,
properties : {
property : 'models' ,
} ,
} ,
{
type : 'filter' ,
properties : {
pass : "={{ !$responseItem.name.includes('embedding') }}" ,
} ,
} ,
{
type : 'setKeyValue' ,
properties : {
name : '={{$responseItem.name}}' ,
value : '={{$responseItem.name}}' ,
description : '={{$responseItem.description}}' ,
} ,
} ,
{
type : 'sort' ,
properties : {
key : 'name' ,
} ,
} ,
] ,
} ,
} ,
} ,
} ,
routing : {
send : {
type : 'body' ,
property : 'model' ,
} ,
} ,
default : 'models/gemini-1.0-pro' ,
} ,
{
displayName : 'Options' ,
name : 'options' ,
placeholder : 'Add Option' ,
description : 'Additional options to add' ,
type : 'collection' ,
default : { } ,
options : [
{
displayName : 'Maximum Number of Tokens' ,
name : 'maxOutputTokens' ,
default : 2048 ,
description : 'The maximum number of tokens to generate in the completion' ,
type : 'number' ,
} ,
{
displayName : 'Sampling Temperature' ,
name : 'temperature' ,
default : 0.4 ,
typeOptions : { maxValue : 1 , minValue : 0 , numberPrecision : 1 } ,
description :
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.' ,
type : 'number' ,
} ,
{
displayName : 'Top K' ,
name : 'topK' ,
default : 32 ,
typeOptions : { maxValue : 40 , minValue : - 1 , numberPrecision : 1 } ,
description :
'Used to remove "long tail" low probability responses. Defaults to -1, which disables it.' ,
type : 'number' ,
} ,
{
displayName : 'Top P' ,
name : 'topP' ,
default : 1 ,
typeOptions : { maxValue : 1 , minValue : 0 , numberPrecision : 1 } ,
description :
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.' ,
type : 'number' ,
} ,
// Safety Settings
{
displayName : 'Safety Settings' ,
name : 'safetySettings' ,
type : 'fixedCollection' ,
typeOptions : { multipleValues : true } ,
default : {
values : {
category : harmCategories [ 0 ] . name as HarmCategory ,
threshold : harmThresholds [ 0 ] . name as HarmBlockThreshold ,
} ,
} ,
placeholder : 'Add Option' ,
options : [
{
name : 'values' ,
displayName : 'Values' ,
values : [
{
displayName : 'Safety Category' ,
name : 'category' ,
type : 'options' ,
description : 'The category of harmful content to block' ,
default : 'HARM_CATEGORY_UNSPECIFIED' ,
options : harmCategories ,
} ,
{
displayName : 'Safety Threshold' ,
name : 'threshold' ,
type : 'options' ,
description : 'The threshold of harmful content to block' ,
default : 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' ,
options : harmThresholds ,
} ,
] ,
} ,
] ,
} ,
] ,
} ,
] ,
} ;
async supplyData ( this : IExecuteFunctions , itemIndex : number ) : Promise < SupplyData > {
const credentials = await this . getCredentials ( 'googlePalmApi' ) ;
const modelName = this . getNodeParameter ( 'modelName' , itemIndex ) as string ;
const options = this . getNodeParameter ( 'options' , itemIndex , {
maxOutputTokens : 1024 ,
temperature : 0.7 ,
topK : 40 ,
topP : 0.9 ,
} ) as {
maxOutputTokens : number ;
temperature : number ;
topK : number ;
topP : number ;
} ;
const safetySettings = this . getNodeParameter (
'options.safetySettings.values' ,
itemIndex ,
null ,
) as SafetySetting [ ] ;
const model = new ChatGoogleGenerativeAI ( {
apiKey : credentials.apiKey as string ,
modelName ,
topK : options.topK ,
topP : options.topP ,
temperature : options.temperature ,
maxOutputTokens : options.maxOutputTokens ,
safetySettings ,
2024-05-12 12:12:07 -07:00
callbacks : [ new N8nLlmTracing ( this ) ] ,
2024-04-15 04:56:44 -07:00
} ) ;
return {
2024-05-12 12:12:07 -07:00
response : model ,
2024-04-15 04:56:44 -07:00
} ;
}
}