add hardcoded model logic/ add model validation for max and min values

This commit is contained in:
Stamsy 2025-03-03 22:34:17 +02:00
parent 2c2f5e063f
commit d99bd2fedc
3 changed files with 76 additions and 23 deletions

View file

@ -34,7 +34,7 @@ export const chatCompletionsOperations: INodeProperties[] = [
export const chatCompletionsFields: INodeProperties[] = [ export const chatCompletionsFields: INodeProperties[] = [
{ {
displayName: 'Model', //TODO Fix resource locator to load the models displayName: 'Model',
name: 'model', name: 'model',
type: 'resourceLocator', type: 'resourceLocator',
default: { mode: 'list', value: '' }, default: { mode: 'list', value: '' },
@ -45,22 +45,25 @@ export const chatCompletionsFields: INodeProperties[] = [
name: 'list', name: 'list',
type: 'list', type: 'list',
typeOptions: { typeOptions: {
options: [ searchListMethod: 'getModels',
{ name: 'Sonar Deep Research', value: 'sonar-deep-research' },
{ name: 'Sonar Reasoning Pro', value: 'sonar-reasoning-pro' },
{ name: 'Sonar Reasoning', value: 'sonar-reasoning' },
{ name: 'Sonar Pro', value: 'sonar-pro' },
{ name: 'Sonar', value: 'sonar' },
{ name: 'R1-1776', value: 'r1-1776' },
],
searchable: true, searchable: true,
} as any, },
}, },
{ {
displayName: 'By ID', displayName: 'By ID',
name: 'id', name: 'id',
type: 'string', type: 'string',
placeholder: 'e.g. sonar-deep-research', placeholder: 'e.g. sonar-deep-research',
validation: [
{
type: 'regex',
properties: {
regex: '^[a-zA-Z0-9-]+$',
errorMessage:
'Not a valid Perplexity model ID. Model IDs must contain only alphanumeric characters and hyphens.',
},
},
],
}, },
], ],
description: 'The model which will generate the completion', description: 'The model which will generate the completion',
@ -150,6 +153,9 @@ export const chatCompletionsFields: INodeProperties[] = [
name: 'frequency_penalty', name: 'frequency_penalty',
type: 'number', type: 'number',
default: 1, default: 1,
typeOptions: {
minValue: 1,
},
description: description:
"Values greater than 1.0 penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim", "Values greater than 1.0 penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim",
routing: { routing: {
@ -181,9 +187,8 @@ export const chatCompletionsFields: INodeProperties[] = [
description: description:
"A value between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. A value between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", "A value between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. A value between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
typeOptions: { typeOptions: {
min: -2.0, minValue: -2.0,
max: 2.0, maxValue: 2.0,
numberPrecision: 1,
}, },
routing: { routing: {
send: { send: {
@ -219,9 +224,8 @@ export const chatCompletionsFields: INodeProperties[] = [
description: description:
'The amount of randomness in the response, valued between 0 inclusive and 2 exclusive. Higher values are more random, and lower values are more deterministic.', 'The amount of randomness in the response, valued between 0 inclusive and 2 exclusive. Higher values are more random, and lower values are more deterministic.',
typeOptions: { typeOptions: {
min: 0, minValue: 0,
max: 2, maxValue: 2,
numberPrecision: 1,
}, },
routing: { routing: {
send: { send: {
@ -238,9 +242,8 @@ export const chatCompletionsFields: INodeProperties[] = [
description: description:
'The number of tokens to keep for highest top-k filtering, specified as an integer between 0 and 2048 inclusive. If set to 0, top-k filtering is disabled. We recommend either altering Top K or Top P, but not both.', 'The number of tokens to keep for highest top-k filtering, specified as an integer between 0 and 2048 inclusive. If set to 0, top-k filtering is disabled. We recommend either altering Top K or Top P, but not both.',
typeOptions: { typeOptions: {
min: 0, minValue: 0,
max: 2048, maxValue: 2048,
numberPrecision: 1,
}, },
routing: { routing: {
send: { send: {
@ -257,9 +260,8 @@ export const chatCompletionsFields: INodeProperties[] = [
description: description:
'The nucleus sampling threshold, valued between 0 and 1 inclusive. For each subsequent token, the model considers the results of the tokens with top_p probability mass. We recommend either altering top_k or top_p, but not both.', 'The nucleus sampling threshold, valued between 0 and 1 inclusive. For each subsequent token, the model considers the results of the tokens with top_p probability mass. We recommend either altering top_k or top_p, but not both.',
typeOptions: { typeOptions: {
min: 0, minValue: 0,
max: 1, maxValue: 1,
numberPrecision: 1,
}, },
routing: { routing: {
send: { send: {
@ -302,11 +304,13 @@ export const chatCompletionsFields: INodeProperties[] = [
type: 'string', type: 'string',
default: '', default: '',
description: description:
'Limit the citations used by the online model to URLs from the specified domains. For blacklisting add a - to the beginning of the domain string. Requires Perplexity API usage Tier-3.', 'Limit the citations used by the online model to URLs from the specified domains. For blacklisting, add a - to the beginning of the domain string (e.g., -domain1). Currently limited to 3 domains. Requires Perplexity API usage Tier-3.',
placeholder: 'e.g. domain1,domain2,-domain3',
routing: { routing: {
send: { send: {
type: 'body', type: 'body',
property: 'search_domain_filter', property: 'search_domain_filter',
value: '={{ $value.split(",").map(domain => domain.trim()) }}',
}, },
}, },
}, },

View file

@ -1,7 +1,10 @@
import type { import type {
IExecuteSingleFunctions, IExecuteSingleFunctions,
ILoadOptionsFunctions,
IN8nHttpFullResponse, IN8nHttpFullResponse,
INodeExecutionData, INodeExecutionData,
INodeListSearchResult,
INodePropertyOptions,
JsonObject, JsonObject,
} from 'n8n-workflow'; } from 'n8n-workflow';
import { NodeApiError } from 'n8n-workflow'; import { NodeApiError } from 'n8n-workflow';
@ -16,3 +19,42 @@ export async function sendErrorPostReceive(
} }
return data; return data;
} }
export async function getModels(
this: ILoadOptionsFunctions,
filter?: string,
): Promise<INodeListSearchResult> {
const models: INodePropertyOptions[] = [
{
name: 'Sonar Deep Research',
value: 'sonar-deep-research',
},
{
name: 'Sonar Reasoning Pro',
value: 'sonar-reasoning-pro',
},
{
name: 'Sonar Reasoning',
value: 'sonar-reasoning',
},
{
name: 'Sonar Pro',
value: 'sonar-pro',
},
{
name: 'Sonar',
value: 'sonar',
},
{
name: 'R1-1776',
value: 'r1-1776',
},
];
const filteredModels = filter
? models.filter((model) => model.name.toLowerCase().includes(filter.toLowerCase()))
: models;
return {
results: filteredModels,
};
}

View file

@ -2,6 +2,7 @@ import type { INodeType, INodeTypeDescription } from 'n8n-workflow';
import { NodeConnectionType } from 'n8n-workflow'; import { NodeConnectionType } from 'n8n-workflow';
import { chatCompletionsFields, chatCompletionsOperations } from './ChatCompletionsDescription'; import { chatCompletionsFields, chatCompletionsOperations } from './ChatCompletionsDescription';
import { getModels } from './GenericFunctions';
export class Perplexity implements INodeType { export class Perplexity implements INodeType {
description: INodeTypeDescription = { description: INodeTypeDescription = {
@ -45,4 +46,10 @@ export class Perplexity implements INodeType {
...chatCompletionsFields, ...chatCompletionsFields,
], ],
}; };
methods = {
listSearch: {
getModels,
},
};
} }