mirror of
https://github.com/n8n-io/n8n.git
synced 2025-03-05 20:50:17 -08:00
fix(OpenAI Node): Limit chat history to context window when using memory (#13137)
This commit is contained in:
parent
df8f059504
commit
f057cfb46a
|
@ -22,7 +22,7 @@ import { promptTypeOptions } from '@utils/descriptions';
|
|||
import { getConnectedTools } from '@utils/helpers';
|
||||
import { getTracingConfig } from '@utils/tracing';
|
||||
|
||||
import { formatToOpenAIAssistantTool } from '../../helpers/utils';
|
||||
import { formatToOpenAIAssistantTool, getChatMessages } from '../../helpers/utils';
|
||||
import { assistantRLC } from '../descriptions';
|
||||
|
||||
const properties: INodeProperties[] = [
|
||||
|
@ -252,7 +252,7 @@ export async function execute(this: IExecuteFunctions, i: number): Promise<INode
|
|||
};
|
||||
let thread: OpenAIClient.Beta.Threads.Thread;
|
||||
if (memory) {
|
||||
const chatMessages = await memory.chatHistory.getMessages();
|
||||
const chatMessages = await getChatMessages(memory);
|
||||
|
||||
// Construct a new thread from the chat history to map the memory
|
||||
if (chatMessages.length) {
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
import type { BaseMessage } from '@langchain/core/messages';
|
||||
import type { StructuredTool } from '@langchain/core/tools';
|
||||
import type { OpenAIClient } from '@langchain/openai';
|
||||
import type { BufferWindowMemory } from 'langchain/memory';
|
||||
import { zodToJsonSchema } from 'zod-to-json-schema';
|
||||
|
||||
// Copied from langchain(`langchain/src/tools/convert_to_openai.ts`)
|
||||
|
@ -43,3 +45,7 @@ export function formatToOpenAIAssistantTool(tool: StructuredTool): OpenAIClient.
|
|||
},
|
||||
};
|
||||
}
|
||||
|
||||
export async function getChatMessages(memory: BufferWindowMemory): Promise<BaseMessage[]> {
|
||||
return (await memory.loadMemoryVariables({}))[memory.memoryKey] as BaseMessage[];
|
||||
}
|
||||
|
|
46
packages/@n8n/nodes-langchain/nodes/vendors/OpenAi/test/utils.test.ts
vendored
Normal file
46
packages/@n8n/nodes-langchain/nodes/vendors/OpenAi/test/utils.test.ts
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
import { AIMessage, HumanMessage } from '@langchain/core/messages';
|
||||
import { BufferWindowMemory } from 'langchain/memory';
|
||||
|
||||
import { getChatMessages } from '../helpers/utils';
|
||||
|
||||
describe('OpenAI message history', () => {
|
||||
it('should only get a limited number of messages', async () => {
|
||||
const memory = new BufferWindowMemory({
|
||||
returnMessages: true,
|
||||
k: 2,
|
||||
});
|
||||
expect(await getChatMessages(memory)).toEqual([]);
|
||||
|
||||
await memory.saveContext(
|
||||
[new HumanMessage({ content: 'human 1' })],
|
||||
[new AIMessage({ content: 'ai 1' })],
|
||||
);
|
||||
// `k` means turns, but `getChatMessages` returns messages, so a Human and an AI message.
|
||||
expect((await getChatMessages(memory)).length).toEqual(2);
|
||||
|
||||
await memory.saveContext(
|
||||
[new HumanMessage({ content: 'human 2' })],
|
||||
[new AIMessage({ content: 'ai 2' })],
|
||||
);
|
||||
expect((await getChatMessages(memory)).length).toEqual(4);
|
||||
expect((await getChatMessages(memory)).map((msg) => msg.content)).toEqual([
|
||||
'human 1',
|
||||
'ai 1',
|
||||
'human 2',
|
||||
'ai 2',
|
||||
]);
|
||||
|
||||
// We expect this to be trimmed...
|
||||
await memory.saveContext(
|
||||
[new HumanMessage({ content: 'human 3' })],
|
||||
[new AIMessage({ content: 'ai 3' })],
|
||||
);
|
||||
expect((await getChatMessages(memory)).length).toEqual(4);
|
||||
expect((await getChatMessages(memory)).map((msg) => msg.content)).toEqual([
|
||||
'human 2',
|
||||
'ai 2',
|
||||
'human 3',
|
||||
'ai 3',
|
||||
]);
|
||||
});
|
||||
});
|
Loading…
Reference in a new issue