This commit is contained in:
Ria Scholz 2025-03-05 13:50:58 +01:00 committed by GitHub
commit 2dc556306d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 306 additions and 0 deletions

View file

@ -0,0 +1,47 @@
import type {
IAuthenticateGeneric,
ICredentialTestRequest,
ICredentialType,
INodeProperties,
} from 'n8n-workflow';
export class XAiApi implements ICredentialType {
name = 'xAiApi';
displayName = 'xAi';
documentationUrl = 'xAi';
properties: INodeProperties[] = [
{
displayName: 'API Key',
name: 'apiKey',
type: 'string',
typeOptions: { password: true },
required: true,
default: '',
},
{
displayName: 'Base URL',
name: 'url',
type: 'hidden',
default: 'https://api.x.ai/v1',
},
];
authenticate: IAuthenticateGeneric = {
type: 'generic',
properties: {
headers: {
Authorization: '=Bearer {{$credentials.apiKey}}',
},
},
};
test: ICredentialTestRequest = {
request: {
baseURL: '={{ $credentials.url }}',
url: '/models',
},
};
}

View file

@ -99,6 +99,7 @@ function getInputs(
'@n8n/n8n-nodes-langchain.lmChatAzureOpenAi', '@n8n/n8n-nodes-langchain.lmChatAzureOpenAi',
'@n8n/n8n-nodes-langchain.lmChatDeepSeek', '@n8n/n8n-nodes-langchain.lmChatDeepSeek',
'@n8n/n8n-nodes-langchain.lmChatOpenRouter', '@n8n/n8n-nodes-langchain.lmChatOpenRouter',
'@n8n/n8n-nodes-langchain.lmChatXAiGrok',
], ],
}, },
}, },
@ -129,6 +130,7 @@ function getInputs(
'@n8n/n8n-nodes-langchain.lmChatGoogleGemini', '@n8n/n8n-nodes-langchain.lmChatGoogleGemini',
'@n8n/n8n-nodes-langchain.lmChatDeepSeek', '@n8n/n8n-nodes-langchain.lmChatDeepSeek',
'@n8n/n8n-nodes-langchain.lmChatOpenRouter', '@n8n/n8n-nodes-langchain.lmChatOpenRouter',
'@n8n/n8n-nodes-langchain.lmChatXAiGrok',
], ],
}, },
}, },

View file

@ -0,0 +1,253 @@
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
import {
NodeConnectionType,
type INodeType,
type INodeTypeDescription,
type ISupplyDataFunctions,
type SupplyData,
} from 'n8n-workflow';
import { getConnectionHintNoticeField } from '@utils/sharedFields';
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
import { N8nLlmTracing } from '../N8nLlmTracing';
export class LmChatXAiGrok implements INodeType {
description: INodeTypeDescription = {
displayName: 'xAI Grok Chat Model',
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
name: 'lmChatXAiGrok',
icon: { light: 'file:logo.dark.svg', dark: 'file:logo.svg' },
group: ['transform'],
version: [1],
description: 'For advanced usage with an AI chain',
defaults: {
name: 'xAI Grok Chat Model',
},
codex: {
categories: ['AI'],
subcategories: {
AI: ['Language Models', 'Root Nodes'],
'Language Models': ['Chat Models (Recommended)'],
},
resources: {
primaryDocumentation: [
{
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.xxxxxxxx/',
},
],
},
},
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
inputs: [],
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
outputs: [NodeConnectionType.AiLanguageModel],
outputNames: ['Model'],
credentials: [
{
name: 'xAiApi',
required: true,
},
],
requestDefaults: {
ignoreHttpStatusErrors: true,
baseURL: '={{ $credentials?.url }}',
},
properties: [
getConnectionHintNoticeField([NodeConnectionType.AiChain, NodeConnectionType.AiAgent]),
{
displayName:
'If using JSON response format, you must include word "json" in the prompt in your chain or agent. Also, make sure to select latest models released post November 2023.',
name: 'notice',
type: 'notice',
default: '',
displayOptions: {
show: {
'/options.responseFormat': ['json_object'],
},
},
},
{
displayName: 'Model',
name: 'model',
type: 'options',
description:
'The model which will generate the completion. <a href="https://docs.x.ai/docs/models">Learn more</a>.',
typeOptions: {
loadOptions: {
routing: {
request: {
method: 'GET',
url: '/models',
},
output: {
postReceive: [
{
type: 'rootProperty',
properties: {
property: 'data',
},
},
{
type: 'setKeyValue',
properties: {
name: '={{$responseItem.id}}',
value: '={{$responseItem.id}}',
},
},
{
type: 'sort',
properties: {
key: 'name',
},
},
],
},
},
},
},
routing: {
send: {
type: 'body',
property: 'model',
},
},
default: 'grok-2-vision-1212',
},
{
displayName: 'Options',
name: 'options',
placeholder: 'Add Option',
description: 'Additional options to add',
type: 'collection',
default: {},
options: [
{
displayName: 'Frequency Penalty',
name: 'frequencyPenalty',
default: 0,
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
description:
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
type: 'number',
},
{
displayName: 'Maximum Number of Tokens',
name: 'maxTokens',
default: -1,
description:
'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',
type: 'number',
typeOptions: {
maxValue: 32768,
},
},
{
displayName: 'Response Format',
name: 'responseFormat',
default: 'text',
type: 'options',
options: [
{
name: 'Text',
value: 'text',
description: 'Regular text response',
},
{
name: 'JSON',
value: 'json_object',
description:
'Enables JSON mode, which should guarantee the message the model generates is valid JSON',
},
],
},
{
displayName: 'Presence Penalty',
name: 'presencePenalty',
default: 0,
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
description:
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
type: 'number',
},
{
displayName: 'Sampling Temperature',
name: 'temperature',
default: 0.7,
typeOptions: { maxValue: 2, minValue: 0, numberPrecision: 1 },
description:
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
},
{
displayName: 'Timeout',
name: 'timeout',
default: 360000,
description: 'Maximum amount of time a request is allowed to take in milliseconds',
type: 'number',
},
{
displayName: 'Max Retries',
name: 'maxRetries',
default: 2,
description: 'Maximum number of retries to attempt',
type: 'number',
},
{
displayName: 'Top P',
name: 'topP',
default: 1,
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
description:
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
type: 'number',
},
],
},
],
};
async supplyData(this: ISupplyDataFunctions, itemIndex: number): Promise<SupplyData> {
const credentials = await this.getCredentials<OpenAICompatibleCredential>('xAiApi');
const modelName = this.getNodeParameter('model', itemIndex) as string;
const options = this.getNodeParameter('options', itemIndex, {}) as {
frequencyPenalty?: number;
maxTokens?: number;
maxRetries: number;
timeout: number;
presencePenalty?: number;
temperature?: number;
topP?: number;
responseFormat?: 'text' | 'json_object';
};
const configuration: ClientOptions = {
baseURL: credentials.url,
};
const model = new ChatOpenAI({
openAIApiKey: credentials.apiKey,
modelName,
...options,
timeout: options.timeout ?? 60000,
maxRetries: options.maxRetries ?? 2,
configuration,
callbacks: [new N8nLlmTracing(this)],
modelKwargs: options.responseFormat
? {
response_format: { type: options.responseFormat },
}
: undefined,
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),
});
return {
response: model,
};
}
}

View file

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="currentColor" viewBox="0 0 24 24" aria-hidden="true" class="" focusable="false" style="fill: currentcolor; height: 28px; width: 28px;"><path d="m3.005 8.858 8.783 12.544h3.904L6.908 8.858zM6.905 15.825 3 21.402h3.907l1.951-2.788zM16.585 2l-6.75 9.64 1.953 2.79L20.492 2zM17.292 7.965v13.437h3.2V3.395z"></path></svg>

After

Width:  |  Height:  |  Size: 363 B

View file

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1000 1000" width="32" height="32"><g><polygon fill="#fff" points="226.83 411.15 501.31 803.15 623.31 803.15 348.82 411.15 226.83 411.15"></polygon><polygon fill="#fff" points="348.72 628.87 226.69 803.15 348.77 803.15 409.76 716.05 348.72 628.87"></polygon><polygon fill="#fff" points="651.23 196.85 440.28 498.12 501.32 585.29 773.31 196.85 651.23 196.85"></polygon><polygon fill="#fff" points="673.31 383.25 673.31 803.15 773.31 803.15 773.31 240.44 673.31 383.25"></polygon></g></svg>

After

Width:  |  Height:  |  Size: 541 B

View file

@ -37,6 +37,7 @@
"dist/credentials/QdrantApi.credentials.js", "dist/credentials/QdrantApi.credentials.js",
"dist/credentials/SerpApi.credentials.js", "dist/credentials/SerpApi.credentials.js",
"dist/credentials/WolframAlphaApi.credentials.js", "dist/credentials/WolframAlphaApi.credentials.js",
"dist/credentials/XAiApi.credentials.js",
"dist/credentials/XataApi.credentials.js", "dist/credentials/XataApi.credentials.js",
"dist/credentials/ZepApi.credentials.js" "dist/credentials/ZepApi.credentials.js"
], ],
@ -73,6 +74,7 @@
"dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js", "dist/nodes/llms/LmChatMistralCloud/LmChatMistralCloud.node.js",
"dist/nodes/llms/LMChatOllama/LmChatOllama.node.js", "dist/nodes/llms/LMChatOllama/LmChatOllama.node.js",
"dist/nodes/llms/LmChatOpenRouter/LmChatOpenRouter.node.js", "dist/nodes/llms/LmChatOpenRouter/LmChatOpenRouter.node.js",
"dist/nodes/llms/LmChatXAiGrok/LmChatXAiGrok.node.js",
"dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js", "dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js",
"dist/nodes/llms/LMOpenAi/LmOpenAi.node.js", "dist/nodes/llms/LMOpenAi/LmOpenAi.node.js",
"dist/nodes/llms/LMCohere/LmCohere.node.js", "dist/nodes/llms/LMCohere/LmCohere.node.js",