mirror of
https://github.com/n8n-io/n8n.git
synced 2024-12-28 14:09:43 -08:00
162 lines
4.3 KiB
TypeScript
162 lines
4.3 KiB
TypeScript
|
/* eslint-disable n8n-nodes-base/node-dirname-against-convention */
|
||
|
import {
|
||
|
NodeConnectionType,
|
||
|
type IExecuteFunctions,
|
||
|
type INodeType,
|
||
|
type INodeTypeDescription,
|
||
|
type SupplyData,
|
||
|
} from 'n8n-workflow';
|
||
|
|
||
|
import { ChatOllama } from 'langchain/chat_models/ollama';
|
||
|
// import { ChatAnthropic } from 'langchain/chat_models/anthropic';
|
||
|
import { logWrapper } from '../../../utils/logWrapper';
|
||
|
import { getConnectionHintNoticeField } from '../../../utils/sharedFields';
|
||
|
|
||
|
export class LmChatOllama implements INodeType {
|
||
|
description: INodeTypeDescription = {
|
||
|
displayName: 'Ollama Chat Model',
|
||
|
// eslint-disable-next-line n8n-nodes-base/node-class-description-name-miscased
|
||
|
name: 'lmChatOllama',
|
||
|
icon: 'file:ollama.svg',
|
||
|
group: ['transform'],
|
||
|
version: 1,
|
||
|
description: 'Language Model Ollama',
|
||
|
defaults: {
|
||
|
name: 'Ollama Chat Model',
|
||
|
},
|
||
|
codex: {
|
||
|
categories: ['AI'],
|
||
|
subcategories: {
|
||
|
AI: ['Language Models'],
|
||
|
},
|
||
|
resources: {
|
||
|
primaryDocumentation: [
|
||
|
{
|
||
|
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmchatollama/',
|
||
|
},
|
||
|
],
|
||
|
},
|
||
|
},
|
||
|
// eslint-disable-next-line n8n-nodes-base/node-class-description-inputs-wrong-regular-node
|
||
|
inputs: [],
|
||
|
// eslint-disable-next-line n8n-nodes-base/node-class-description-outputs-wrong
|
||
|
outputs: [NodeConnectionType.AiLanguageModel],
|
||
|
outputNames: ['Model'],
|
||
|
credentials: [
|
||
|
{
|
||
|
name: 'ollamaApi',
|
||
|
required: true,
|
||
|
},
|
||
|
],
|
||
|
requestDefaults: {
|
||
|
ignoreHttpStatusErrors: true,
|
||
|
baseURL: '={{ $credentials.baseUrl.replace(new RegExp("/$"), "") }}',
|
||
|
},
|
||
|
properties: [
|
||
|
getConnectionHintNoticeField([NodeConnectionType.AiChain, NodeConnectionType.AiAgent]),
|
||
|
{
|
||
|
displayName: 'Model',
|
||
|
name: 'model',
|
||
|
type: 'options',
|
||
|
default: 'llama2',
|
||
|
description:
|
||
|
'The model which will generate the completion. To download models, visit <a href="https://ollama.ai/library">Ollama Models Library</a>.',
|
||
|
typeOptions: {
|
||
|
loadOptions: {
|
||
|
routing: {
|
||
|
request: {
|
||
|
method: 'GET',
|
||
|
url: '/api/tags',
|
||
|
},
|
||
|
output: {
|
||
|
postReceive: [
|
||
|
{
|
||
|
type: 'rootProperty',
|
||
|
properties: {
|
||
|
property: 'models',
|
||
|
},
|
||
|
},
|
||
|
{
|
||
|
type: 'setKeyValue',
|
||
|
properties: {
|
||
|
name: '={{$responseItem.name}}',
|
||
|
value: '={{$responseItem.name}}',
|
||
|
},
|
||
|
},
|
||
|
{
|
||
|
type: 'sort',
|
||
|
properties: {
|
||
|
key: 'name',
|
||
|
},
|
||
|
},
|
||
|
],
|
||
|
},
|
||
|
},
|
||
|
},
|
||
|
},
|
||
|
routing: {
|
||
|
send: {
|
||
|
type: 'body',
|
||
|
property: 'model',
|
||
|
},
|
||
|
},
|
||
|
required: true,
|
||
|
},
|
||
|
{
|
||
|
displayName: 'Options',
|
||
|
name: 'options',
|
||
|
placeholder: 'Add Option',
|
||
|
description: 'Additional options to add',
|
||
|
type: 'collection',
|
||
|
default: {},
|
||
|
options: [
|
||
|
{
|
||
|
displayName: 'Sampling Temperature',
|
||
|
name: 'temperature',
|
||
|
default: 0.7,
|
||
|
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
|
||
|
description:
|
||
|
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
|
||
|
type: 'number',
|
||
|
},
|
||
|
{
|
||
|
displayName: 'Top K',
|
||
|
name: 'topK',
|
||
|
default: -1,
|
||
|
typeOptions: { maxValue: 1, minValue: -1, numberPrecision: 1 },
|
||
|
description:
|
||
|
'Used to remove "long tail" low probability responses. Defaults to -1, which disables it.',
|
||
|
type: 'number',
|
||
|
},
|
||
|
{
|
||
|
displayName: 'Top P',
|
||
|
name: 'topP',
|
||
|
default: 1,
|
||
|
typeOptions: { maxValue: 1, minValue: 0, numberPrecision: 1 },
|
||
|
description:
|
||
|
'Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered. We generally recommend altering this or temperature but not both.',
|
||
|
type: 'number',
|
||
|
},
|
||
|
],
|
||
|
},
|
||
|
],
|
||
|
};
|
||
|
|
||
|
async supplyData(this: IExecuteFunctions, itemIndex: number): Promise<SupplyData> {
|
||
|
const credentials = await this.getCredentials('ollamaApi');
|
||
|
|
||
|
const modelName = this.getNodeParameter('model', itemIndex) as string;
|
||
|
const options = this.getNodeParameter('options', itemIndex, {}) as object;
|
||
|
|
||
|
const model = new ChatOllama({
|
||
|
baseUrl: credentials.baseUrl as string,
|
||
|
model: modelName,
|
||
|
...options,
|
||
|
});
|
||
|
|
||
|
return {
|
||
|
response: logWrapper(model, this),
|
||
|
};
|
||
|
}
|
||
|
}
|