mirror of
https://github.com/n8n-io/n8n.git
synced 2024-11-09 22:24:05 -08:00
feat: Azure Open AI chat model & embeddings (#8522)
Signed-off-by: Oleg Ivaniv <me@olegivaniv.com>
This commit is contained in:
parent
254700a059
commit
934d0d35b1
|
@ -106,14 +106,19 @@ export function addSupplementalNodeToParent(
|
|||
nodeName: string,
|
||||
endpointType: EndpointType,
|
||||
parentNodeName: string,
|
||||
exactMatch = false,
|
||||
) {
|
||||
getAddInputEndpointByType(parentNodeName, endpointType).click({ force: true });
|
||||
getNodeCreatorItems().contains(nodeName).click();
|
||||
if (exactMatch) {
|
||||
getNodeCreatorItems().contains(new RegExp("^" + nodeName + "$", "g")).click();
|
||||
} else {
|
||||
getNodeCreatorItems().contains(nodeName).click();
|
||||
}
|
||||
getConnectionBySourceAndTarget(parentNodeName, nodeName).should('exist');
|
||||
}
|
||||
|
||||
export function addLanguageModelNodeToParent(nodeName: string, parentNodeName: string) {
|
||||
addSupplementalNodeToParent(nodeName, 'ai_languageModel', parentNodeName);
|
||||
export function addLanguageModelNodeToParent(nodeName: string, parentNodeName: string, exactMatch = false) {
|
||||
addSupplementalNodeToParent(nodeName, 'ai_languageModel', parentNodeName, exactMatch);
|
||||
}
|
||||
|
||||
export function addMemoryNodeToParent(nodeName: string, parentNodeName: string) {
|
||||
|
|
|
@ -83,6 +83,7 @@ describe('Langchain Integration', () => {
|
|||
addLanguageModelNodeToParent(
|
||||
AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME,
|
||||
BASIC_LLM_CHAIN_NODE_NAME,
|
||||
true
|
||||
);
|
||||
|
||||
clickCreateNewCredential();
|
||||
|
@ -121,7 +122,7 @@ describe('Langchain Integration', () => {
|
|||
addNodeToCanvas(MANUAL_CHAT_TRIGGER_NODE_NAME, true);
|
||||
addNodeToCanvas(AGENT_NODE_NAME, true);
|
||||
|
||||
addLanguageModelNodeToParent(AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, AGENT_NODE_NAME);
|
||||
addLanguageModelNodeToParent(AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, AGENT_NODE_NAME, true);
|
||||
|
||||
clickCreateNewCredential();
|
||||
setCredentialValues({
|
||||
|
@ -159,7 +160,7 @@ describe('Langchain Integration', () => {
|
|||
addNodeToCanvas(MANUAL_CHAT_TRIGGER_NODE_NAME, true);
|
||||
addNodeToCanvas(AGENT_NODE_NAME, true);
|
||||
|
||||
addLanguageModelNodeToParent(AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, AGENT_NODE_NAME);
|
||||
addLanguageModelNodeToParent(AI_LANGUAGE_MODEL_OPENAI_CHAT_MODEL_NODE_NAME, AGENT_NODE_NAME, true);
|
||||
|
||||
clickCreateNewCredential();
|
||||
setCredentialValues({
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
import type { IAuthenticateGeneric, ICredentialType, INodeProperties } from 'n8n-workflow';
|
||||
|
||||
export class AzureOpenAiApi implements ICredentialType {
|
||||
name = 'azureOpenAiApi';
|
||||
|
||||
displayName = 'Azure Open AI';
|
||||
|
||||
documentationUrl = 'azureopenai';
|
||||
|
||||
properties: INodeProperties[] = [
|
||||
{
|
||||
displayName: 'API Key',
|
||||
name: 'apiKey',
|
||||
type: 'string',
|
||||
typeOptions: { password: true },
|
||||
required: true,
|
||||
default: '',
|
||||
},
|
||||
{
|
||||
displayName: 'Resource Name',
|
||||
name: 'resourceName',
|
||||
type: 'string',
|
||||
required: true,
|
||||
default: '',
|
||||
},
|
||||
{
|
||||
displayName: 'API Version',
|
||||
name: 'apiVersion',
|
||||
type: 'string',
|
||||
required: true,
|
||||
default: '2023-05-15',
|
||||
},
|
||||
];
|
||||
|
||||
authenticate: IAuthenticateGeneric = {
|
||||
type: 'generic',
|
||||
properties: {
|
||||
headers: {
|
||||
'api-key': '={{$credentials.apiKey}}',
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
|
||||
import {
|
||||
NodeConnectionType,
|
||||
type IExecuteFunctions,
|
||||
type INodeType,
|
||||
type INodeTypeDescription,
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { OpenAIEmbeddings } from 'langchain/embeddings/openai';
|
||||
import { logWrapper } from '../../../utils/logWrapper';
|
||||
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
|
||||
|
||||
export class EmbeddingsAzureOpenAi implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
displayName: 'Embeddings Azure OpenAI',
|
||||
name: 'embeddingsAzureOpenAi',
|
||||
icon: 'file:azure.svg',
|
||||
credentials: [
|
||||
{
|
||||
name: 'azureOpenAiApi',
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
group: ['transform'],
|
||||
version: 1,
|
||||
description: 'Use Embeddings Azure OpenAI',
|
||||
defaults: {
|
||||
name: 'Embeddings Azure OpenAI',
|
||||
},
|
||||
|
||||
codex: {
|
||||
categories: ['AI'],
|
||||
subcategories: {
|
||||
AI: ['Embeddings'],
|
||||
},
|
||||
resources: {
|
||||
primaryDocumentation: [
|
||||
{
|
||||
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.embeddingsazureopenai/',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
|
||||
inputs: [],
|
||||
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
|
||||
outputs: [NodeConnectionType.AiEmbedding],
|
||||
outputNames: ['Embeddings'],
|
||||
properties: [
|
||||
getConnectionHintNoticeField([NodeConnectionType.AiVectorStore]),
|
||||
{
|
||||
displayName: 'Model (Deployment) Name',
|
||||
name: 'model',
|
||||
type: 'string',
|
||||
description: 'The name of the model(deployment) to use',
|
||||
default: '',
|
||||
},
|
||||
{
|
||||
displayName: 'Options',
|
||||
name: 'options',
|
||||
placeholder: 'Add Option',
|
||||
description: 'Additional options to add',
|
||||
type: 'collection',
|
||||
default: {},
|
||||
options: [
|
||||
{
|
||||
displayName: 'Batch Size',
|
||||
name: 'batchSize',
|
||||
default: 512,
|
||||
typeOptions: { maxValue: 2048 },
|
||||
description: 'Maximum number of documents to send in each request',
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Strip New Lines',
|
||||
name: 'stripNewLines',
|
||||
default: true,
|
||||
description: 'Whether to strip new lines from the input text',
|
||||
type: 'boolean',
|
||||
},
|
||||
{
|
||||
displayName: 'Timeout',
|
||||
name: 'timeout',
|
||||
default: -1,
|
||||
description:
|
||||
'Maximum amount of time a request is allowed to take in seconds. Set to -1 for no timeout.',
|
||||
type: 'number',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
async supplyData(this: IExecuteFunctions, itemIndex: number): Promise<SupplyData> {
|
||||
this.logger.verbose('Supply data for embeddings');
|
||||
const credentials = (await this.getCredentials('azureOpenAiApi')) as {
|
||||
apiKey: string;
|
||||
resourceName: string;
|
||||
apiVersion: string;
|
||||
};
|
||||
const modelName = this.getNodeParameter('model', itemIndex) as string;
|
||||
|
||||
const options = this.getNodeParameter('options', itemIndex, {}) as {
|
||||
batchSize?: number;
|
||||
stripNewLines?: boolean;
|
||||
timeout?: number;
|
||||
};
|
||||
|
||||
if (options.timeout === -1) {
|
||||
options.timeout = undefined;
|
||||
}
|
||||
|
||||
const embeddings = new OpenAIEmbeddings({
|
||||
azureOpenAIApiDeploymentName: modelName,
|
||||
azureOpenAIApiInstanceName: credentials.resourceName,
|
||||
azureOpenAIApiKey: credentials.apiKey,
|
||||
azureOpenAIApiVersion: credentials.apiVersion,
|
||||
...options,
|
||||
});
|
||||
|
||||
return {
|
||||
response: logWrapper(embeddings, this),
|
||||
};
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" width="256" height="242" preserveAspectRatio="xMidYMid"><defs><linearGradient id="a" x1="58.972%" x2="37.191%" y1="7.411%" y2="103.762%"><stop offset="0%" stop-color="#114A8B"/><stop offset="100%" stop-color="#0669BC"/></linearGradient><linearGradient id="b" x1="59.719%" x2="52.691%" y1="52.313%" y2="54.864%"><stop offset="0%" stop-opacity=".3"/><stop offset="7.1%" stop-opacity=".2"/><stop offset="32.1%" stop-opacity=".1"/><stop offset="62.3%" stop-opacity=".05"/><stop offset="100%" stop-opacity="0"/></linearGradient><linearGradient id="c" x1="37.279%" x2="62.473%" y1="4.6%" y2="99.979%"><stop offset="0%" stop-color="#3CCBF4"/><stop offset="100%" stop-color="#2892DF"/></linearGradient></defs><path fill="url(#a)" d="M85.343.003h75.753L82.457 233a12.08 12.08 0 0 1-11.442 8.216H12.06A12.06 12.06 0 0 1 .633 225.303L73.898 8.219A12.08 12.08 0 0 1 85.343 0z"/><path fill="#0078D4" d="M195.423 156.282H75.297a5.56 5.56 0 0 0-3.796 9.627l77.19 72.047a12.14 12.14 0 0 0 8.28 3.26h68.02z"/><path fill="url(#b)" d="M85.343.003a11.98 11.98 0 0 0-11.471 8.376L.723 225.105a12.045 12.045 0 0 0 11.37 16.112h60.475a12.93 12.93 0 0 0 9.921-8.437l14.588-42.991 52.105 48.6a12.33 12.33 0 0 0 7.757 2.828h67.766l-29.721-84.935-86.643.02L161.37.003z"/><path fill="url(#c)" d="M182.098 8.207A12.06 12.06 0 0 0 170.67.003H86.245c5.175 0 9.773 3.301 11.428 8.204L170.94 225.3a12.062 12.062 0 0 1-11.428 15.92h84.429a12.062 12.062 0 0 0 11.425-15.92z"/></svg>
|
After Width: | Height: | Size: 1.5 KiB |
|
@ -0,0 +1,169 @@
|
|||
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
|
||||
import {
|
||||
NodeConnectionType,
|
||||
type IExecuteFunctions,
|
||||
type INodeType,
|
||||
type INodeTypeDescription,
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import type { ClientOptions } from 'openai';
|
||||
import { ChatOpenAI } from 'langchain/chat_models/openai';
|
||||
import { logWrapper } from '../../../utils/logWrapper';
|
||||
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
|
||||
|
||||
export class LmChatAzureOpenAi implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
displayName: 'Azure OpenAI Chat Model',
|
||||
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
|
||||
name: 'lmChatAzureOpenAi',
|
||||
icon: 'file:azure.svg',
|
||||
group: ['transform'],
|
||||
version: 1,
|
||||
description: 'For advanced usage with an AI chain',
|
||||
defaults: {
|
||||
name: 'Azure OpenAI Chat Model',
|
||||
},
|
||||
codex: {
|
||||
categories: ['AI'],
|
||||
subcategories: {
|
||||
AI: ['Language Models'],
|
||||
},
|
||||
resources: {
|
||||
primaryDocumentation: [
|
||||
{
|
||||
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatazureopenai/',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
|
||||
inputs: [],
|
||||
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
|
||||
outputs: [NodeConnectionType.AiLanguageModel],
|
||||
outputNames: ['Model'],
|
||||
credentials: [
|
||||
{
|
||||
name: 'azureOpenAiApi',
|
||||
required: true,
|
||||
},
|
||||
],
|
||||
properties: [
|
||||
getConnectionHintNoticeField([NodeConnectionType.AiChain, NodeConnectionType.AiAgent]),
|
||||
{
|
||||
displayName: 'Model (Deployment) Name',
|
||||
name: 'model',
|
||||
type: 'string',
|
||||
description: 'The name of the model(deployment) to use',
|
||||
default: '',
|
||||
},
|
||||
{
|
||||
displayName: 'Options',
|
||||
name: 'options',
|
||||
placeholder: 'Add Option',
|
||||
description: 'Additional options to add',
|
||||
type: 'collection',
|
||||
default: {},
|
||||
options: [
|
||||
{
|
||||
displayName: 'Frequency Penalty',
|
||||
name: 'frequencyPenalty',
|
||||
default: 0,
|
||||
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
|
||||
description:
|
||||
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Maximum Number of Tokens',
|
||||
name: 'maxTokens',
|
||||
default: -1,
|
||||
description:
|
||||
'The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 32,768).',
|
||||
type: 'number',
|
||||
typeOptions: {
|
||||
maxValue: 32768,
|
||||
},
|
||||
},
|
||||
{
|
||||
displayName: 'Presence Penalty',
|
||||
name: 'presencePenalty',
|
||||
default: 0,
|
||||
typeOptions: { maxValue: 2, minValue: -2, numberPrecision: 1 },
|
||||
description:
|
||||
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics",
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Sampling Temperature',
|
||||
name: 'temperature',
|
||||
default: 0.7,
|
||||
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
|
||||
description:
|
||||
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Timeout',
|
||||
name: 'timeout',
|
||||
default: 60000,
|
||||
description: 'Maximum amount of time a request is allowed to take in milliseconds',
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Max Retries',
|
||||
name: 'maxRetries',
|
||||
default: 2,
|
||||
description: 'Maximum number of retries to attempt',
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
displayName: 'Top P',
|
||||
name: 'topP',
|
||||
default: 1,
|
||||
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
|
||||
description:
|
||||
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
|
||||
type: 'number',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
async supplyData(this: IExecuteFunctions, itemIndex: number): Promise<SupplyData> {
|
||||
const credentials = (await this.getCredentials('azureOpenAiApi')) as {
|
||||
apiKey: string;
|
||||
resourceName: string;
|
||||
apiVersion: string;
|
||||
};
|
||||
|
||||
const modelName = this.getNodeParameter('model', itemIndex) as string;
|
||||
const options = this.getNodeParameter('options', itemIndex, {}) as {
|
||||
frequencyPenalty?: number;
|
||||
maxTokens?: number;
|
||||
maxRetries: number;
|
||||
timeout: number;
|
||||
presencePenalty?: number;
|
||||
temperature?: number;
|
||||
topP?: number;
|
||||
};
|
||||
|
||||
const configuration: ClientOptions = {};
|
||||
|
||||
const model = new ChatOpenAI({
|
||||
azureOpenAIApiDeploymentName: modelName,
|
||||
azureOpenAIApiInstanceName: credentials.resourceName,
|
||||
azureOpenAIApiKey: credentials.apiKey,
|
||||
azureOpenAIApiVersion: credentials.apiVersion,
|
||||
...options,
|
||||
timeout: options.timeout ?? 60000,
|
||||
maxRetries: options.maxRetries ?? 2,
|
||||
configuration,
|
||||
});
|
||||
|
||||
return {
|
||||
response: logWrapper(model, this),
|
||||
};
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" width="256" height="242" preserveAspectRatio="xMidYMid"><defs><linearGradient id="a" x1="58.972%" x2="37.191%" y1="7.411%" y2="103.762%"><stop offset="0%" stop-color="#114A8B"/><stop offset="100%" stop-color="#0669BC"/></linearGradient><linearGradient id="b" x1="59.719%" x2="52.691%" y1="52.313%" y2="54.864%"><stop offset="0%" stop-opacity=".3"/><stop offset="7.1%" stop-opacity=".2"/><stop offset="32.1%" stop-opacity=".1"/><stop offset="62.3%" stop-opacity=".05"/><stop offset="100%" stop-opacity="0"/></linearGradient><linearGradient id="c" x1="37.279%" x2="62.473%" y1="4.6%" y2="99.979%"><stop offset="0%" stop-color="#3CCBF4"/><stop offset="100%" stop-color="#2892DF"/></linearGradient></defs><path fill="url(#a)" d="M85.343.003h75.753L82.457 233a12.08 12.08 0 0 1-11.442 8.216H12.06A12.06 12.06 0 0 1 .633 225.303L73.898 8.219A12.08 12.08 0 0 1 85.343 0z"/><path fill="#0078D4" d="M195.423 156.282H75.297a5.56 5.56 0 0 0-3.796 9.627l77.19 72.047a12.14 12.14 0 0 0 8.28 3.26h68.02z"/><path fill="url(#b)" d="M85.343.003a11.98 11.98 0 0 0-11.471 8.376L.723 225.105a12.045 12.045 0 0 0 11.37 16.112h60.475a12.93 12.93 0 0 0 9.921-8.437l14.588-42.991 52.105 48.6a12.33 12.33 0 0 0 7.757 2.828h67.766l-29.721-84.935-86.643.02L161.37.003z"/><path fill="url(#c)" d="M182.098 8.207A12.06 12.06 0 0 0 170.67.003H86.245c5.175 0 9.773 3.301 11.428 8.204L170.94 225.3a12.062 12.062 0 0 1-11.428 15.92h84.429a12.062 12.062 0 0 0 11.425-15.92z"/></svg>
|
After Width: | Height: | Size: 1.5 KiB |
|
@ -27,6 +27,7 @@
|
|||
"n8nNodesApiVersion": 1,
|
||||
"credentials": [
|
||||
"dist/credentials/AnthropicApi.credentials.js",
|
||||
"dist/credentials/AzureOpenAiApi.credentials.js",
|
||||
"dist/credentials/CohereApi.credentials.js",
|
||||
"dist/credentials/GooglePalmApi.credentials.js",
|
||||
"dist/credentials/HuggingFaceApi.credentials.js",
|
||||
|
@ -53,11 +54,13 @@
|
|||
"dist/nodes/document_loaders/DocumentJSONInputLoader/DocumentJsonInputLoader.node.js",
|
||||
"dist/nodes/embeddings/EmbeddingsCohere/EmbeddingsCohere.node.js",
|
||||
"dist/nodes/embeddings/EmbeddingsAwsBedrock/EmbeddingsAwsBedrock.node.js",
|
||||
"dist/nodes/embeddings/EmbeddingsAzureOpenAi/EmbeddingsAzureOpenAi.node.js",
|
||||
"dist/nodes/embeddings/EmbeddingsGooglePalm/EmbeddingsGooglePalm.node.js",
|
||||
"dist/nodes/embeddings/EmbeddingsHuggingFaceInference/EmbeddingsHuggingFaceInference.node.js",
|
||||
"dist/nodes/embeddings/EmbeddingsMistralCloud/EmbeddingsMistralCloud.node.js",
|
||||
"dist/nodes/embeddings/EmbeddingsOpenAI/EmbeddingsOpenAi.node.js",
|
||||
"dist/nodes/llms/LMChatAnthropic/LmChatAnthropic.node.js",
|
||||
"dist/nodes/llms/LmChatAzureOpenAi/LmChatAzureOpenAi.node.js",
|
||||
"dist/nodes/llms/LmGooglePalm/LmGooglePalm.node.js",
|
||||
"dist/nodes/llms/LmChatAwsBedrock/LmChatAwsBedrock.node.js",
|
||||
"dist/nodes/llms/LmChatGooglePalm/LmChatGooglePalm.node.js",
|
||||
|
|
Loading…
Reference in a new issue