fix(OpenAI Chat Model Node, Ollama Chat Model Node): Change default model to a more up-to-date option (#11293)

This commit is contained in:
Eugene 2024-10-18 09:35:33 +02:00 committed by GitHub
parent a13e142ee2
commit 0be04c6348
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 4 additions and 4 deletions

View file

@ -79,7 +79,7 @@ export class EmbeddingsOpenAi implements INodeType {
},
],
group: ['transform'],
version: 1,
version: [1, 1.1],
description: 'Use Embeddings OpenAI',
defaults: {
name: 'Embeddings OpenAI',

View file

@ -128,7 +128,7 @@ export class LmChatOpenAi implements INodeType {
property: 'model',
},
},
default: 'gpt-3.5-turbo',
default: 'gpt-4o-mini',
},
{
displayName:

View file

@ -17,7 +17,7 @@ export const ollamaModel: INodeProperties = {
displayName: 'Model',
name: 'model',
type: 'options',
default: 'llama2',
default: 'llama3.2',
description:
'The model which will generate the completion. To download models, visit <a href="https://ollama.ai/library">Ollama Models Library</a>.',
typeOptions: {

View file

@ -26,7 +26,7 @@ type RunDetail = {
options: SerializedSecret | SerializedNotImplemented | SerializedFields;
};
const TIKTOKEN_ESTIMATE_MODEL = 'gpt-3.5-turbo';
const TIKTOKEN_ESTIMATE_MODEL = 'gpt-4o';
export class N8nLlmTracing extends BaseCallbackHandler {
name = 'N8nLlmTracing';