fix(editor): Handle large payloads in the AI Assistant requests better (#12747)

This commit is contained in:
Milorad FIlipović 2025-01-22 14:50:28 +01:00 committed by GitHub
parent 60187cab9b
commit eb4dea1ca8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 751 additions and 24 deletions

View file

@ -1,6 +1,9 @@
import { useAIAssistantHelpers } from '@/composables/useAIAssistantHelpers';
import { AI_ASSISTANT_MAX_CONTENT_LENGTH } from '@/constants';
import type { ICredentialsResponse, IRestApiContext } from '@/Interface';
import type { AskAiRequest, ChatRequest, ReplaceCodeRequest } from '@/types/assistant.types';
import { makeRestApiRequest, streamRequest } from '@/utils/apiUtils';
import { getObjectSizeInKB } from '@/utils/objectUtils';
import type { IDataObject } from 'n8n-workflow';
export function chatWithAssistant(
@ -10,6 +13,15 @@ export function chatWithAssistant(
onDone: () => void,
onError: (e: Error) => void,
): void {
try {
const payloadSize = getObjectSizeInKB(payload.payload);
if (payloadSize > AI_ASSISTANT_MAX_CONTENT_LENGTH) {
useAIAssistantHelpers().trimPayloadSize(payload);
}
} catch (e) {
onError(e);
return;
}
void streamRequest<ChatRequest.ResponsePayload>(
ctx,
'/ai/chat',

View file

@ -0,0 +1,430 @@
import { VIEWS } from '@/constants';
import type { ChatRequest } from '@/types/assistant.types';
import { NodeConnectionType } from 'n8n-workflow';
export const PAYLOAD_SIZE_FOR_1_PASS = 4;
export const PAYLOAD_SIZE_FOR_2_PASSES = 2;
export const ERROR_HELPER_TEST_PAYLOAD: ChatRequest.RequestPayload = {
payload: {
role: 'user',
type: 'init-error-helper',
user: {
firstName: 'Milorad',
},
error: {
name: 'NodeOperationError',
message: "Referenced node doesn't exist",
description:
"The node <strong>'Hey'</strong> doesn't exist, but it's used in an expression here.",
},
node: {
position: [0, 0],
parameters: {
mode: 'manual',
duplicateItem: false,
assignments: {
assignments: {
'0': {
id: '0957fbdb-a021-413b-9d42-fc847666f999',
name: 'text',
value: 'Lorem ipsum dolor sit amet',
type: 'string',
},
'1': {
id: '8efecfa7-8df7-492e-83e7-3d517ad03e60',
name: 'foo',
value: {
value: "={{ $('Hey').json.name }}",
resolvedExpressionValue: 'Error in expression: "Referenced node doesn\'t exist"',
},
type: 'string',
},
},
},
includeOtherFields: false,
options: {},
},
type: 'n8n-nodes-base.set',
typeVersion: 3.4,
id: '6dc70bf3-ba54-4481-b9f5-ce255bdd5fb8',
name: 'This is fine',
},
executionSchema: [],
},
};
export const SUPPORT_CHAT_TEST_PAYLOAD: ChatRequest.RequestPayload = {
payload: {
role: 'user',
type: 'init-support-chat',
user: {
firstName: 'Milorad',
},
context: {
currentView: {
name: VIEWS.WORKFLOW,
description:
'The user is currently looking at the current workflow in n8n editor, without any specific node selected.',
},
activeNodeInfo: {
node: {
position: [0, 0],
parameters: {
mode: 'manual',
duplicateItem: false,
assignments: {
assignments: {
'0': {
id: '969e86d0-76de-44f6-b07d-44a8a953f564',
name: 'name',
value: {
value: "={{ $('Edit Fields 2').name }}",
resolvedExpressionValue:
'Error in expression: "Referenced node doesn\'t exist"',
},
type: 'number',
},
},
},
includeOtherFields: false,
options: {},
},
type: 'n8n-nodes-base.set',
typeVersion: 3.4,
id: '8eac1591-ddc6-4d93-bec7-998cbfe27cc7',
name: 'Edit Fields1',
},
executionStatus: {
status: 'error',
error: {
name: 'NodeOperationError',
message: "Referenced node doesn't exist",
stack:
"NodeOperationError: Referenced node doesn't exist\n at ExecuteContext.execute (/Users/miloradfilipovic/workspace/n8n/packages/nodes-base/nodes/Set/v2/manual.mode.ts:256:9)\n at ExecuteContext.execute (/Users/miloradfilipovic/workspace/n8n/packages/nodes-base/nodes/Set/v2/SetV2.node.ts:351:48)\n at WorkflowExecute.runNode (/Users/miloradfilipovic/workspace/n8n/packages/core/src/execution-engine/workflow-execute.ts:1097:31)\n at /Users/miloradfilipovic/workspace/n8n/packages/core/src/execution-engine/workflow-execute.ts:1505:38\n at /Users/miloradfilipovic/workspace/n8n/packages/core/src/execution-engine/workflow-execute.ts:2066:11",
},
},
referencedNodes: [],
},
currentWorkflow: {
name: '🧪 Assistant context test',
active: false,
connections: {
'When clicking Test workflow': {
main: [
[
{
node: 'Edit Fields',
type: NodeConnectionType.Main,
index: 0,
},
],
],
},
'Edit Fields': {
main: [
[
{
node: 'Bad request no chat found',
type: NodeConnectionType.Main,
index: 0,
},
{
node: 'Slack',
type: NodeConnectionType.Main,
index: 0,
},
{
node: 'Edit Fields1',
type: NodeConnectionType.Main,
index: 0,
},
{
node: 'Edit Fields2',
type: NodeConnectionType.Main,
index: 0,
},
],
],
},
},
nodes: [
{
parameters: {
notice: '',
},
id: 'c457ff96-3b0c-4dbc-b47f-dc88396a46ae',
name: 'When clicking Test workflow',
type: 'n8n-nodes-base.manualTrigger',
position: [-60, 200],
typeVersion: 1,
},
{
parameters: {
resource: 'chat',
operation: 'get',
chatId: '13',
},
id: '60ddc045-d4e3-4b62-9832-12ecf78937a6',
name: 'Bad request no chat found',
type: 'n8n-nodes-base.telegram',
typeVersion: 1.1,
position: [540, 0],
issues: {},
disabled: true,
},
{
parameters: {
mode: 'manual',
duplicateItem: false,
assignments: {
assignments: [
{
id: '70448b12-9b2b-4bfb-abee-6432c4c58de1',
name: 'name',
value: 'Joe',
type: 'string',
},
],
},
includeOtherFields: false,
options: {},
},
type: 'n8n-nodes-base.set',
typeVersion: 3.4,
position: [200, 200],
id: '0a831739-13cd-4541-b20b-7db73abbcaf0',
name: 'Edit Fields',
},
{
parameters: {
authentication: 'oAuth2',
resource: 'channel',
operation: 'archive',
channelId: {
__rl: true,
mode: 'list',
value: '',
},
},
type: 'n8n-nodes-base.slack',
typeVersion: 2.2,
position: [540, 200],
id: 'aff7471e-b2bc-4274-abe1-97897a17eaa6',
name: 'Slack',
webhookId: '7f8b574c-7729-4220-bbe9-bf5aa382406a',
credentials: {
slackOAuth2Api: {
id: 'mZRj4wi3gavIzu9b',
name: 'Slack account',
},
},
disabled: true,
},
{
parameters: {
mode: 'manual',
duplicateItem: false,
assignments: {
assignments: [
{
id: '969e86d0-76de-44f6-b07d-44a8a953f564',
name: 'name',
value: "={{ $('Edit Fields 2').name }}",
type: 'number',
},
],
},
includeOtherFields: false,
options: {},
},
type: 'n8n-nodes-base.set',
typeVersion: 3.4,
position: [540, 400],
id: '8eac1591-ddc6-4d93-bec7-998cbfe27cc7',
name: 'Edit Fields1',
issues: {
execution: true,
},
},
{
parameters: {
mode: 'manual',
duplicateItem: false,
assignments: {
assignments: [
{
id: '9bdfc283-64f7-41c5-9a55-b8d8ccbe3e9d',
name: 'age',
value: '={{ $json.name }}',
type: 'number',
},
],
},
includeOtherFields: false,
options: {},
},
type: 'n8n-nodes-base.set',
typeVersion: 3.4,
position: [440, 560],
id: '34e56e14-d1a9-4a73-9208-15d39771a9ba',
name: 'Edit Fields2',
},
],
},
executionData: {
runData: {
'When clicking Test workflow': [
{
hints: [],
startTime: 1737540693122,
executionTime: 1,
source: [],
executionStatus: 'success',
},
],
'Edit Fields': [
{
hints: [],
startTime: 1737540693124,
executionTime: 2,
source: [
{
previousNode: 'When clicking Test workflow',
},
],
executionStatus: 'success',
},
],
'Bad request no chat found': [
{
hints: [],
startTime: 1737540693126,
executionTime: 0,
source: [
{
previousNode: 'Edit Fields',
},
],
executionStatus: 'success',
},
],
Slack: [
{
hints: [],
startTime: 1737540693127,
executionTime: 0,
source: [
{
previousNode: 'Edit Fields',
},
],
executionStatus: 'success',
},
],
'Edit Fields1': [
{
hints: [],
startTime: 1737540693127,
executionTime: 28,
source: [
{
previousNode: 'Edit Fields',
},
],
executionStatus: 'error',
// @ts-expect-error Incomplete mock objects are expected
error: {
level: 'warning',
tags: {
packageName: 'workflow',
},
context: {
itemIndex: 0,
nodeCause: 'Edit Fields 2',
descriptionKey: 'nodeNotFound',
parameter: 'assignments',
},
functionality: 'regular',
name: 'NodeOperationError',
timestamp: 1737540693141,
node: {
parameters: {
mode: 'manual',
duplicateItem: false,
assignments: {
assignments: [
{
id: '969e86d0-76de-44f6-b07d-44a8a953f564',
name: 'name',
value: "={{ $('Edit Fields 2').name }}",
type: 'number',
},
],
},
includeOtherFields: false,
options: {},
},
type: 'n8n-nodes-base.set',
typeVersion: 3.4,
position: [540, 400],
id: '8eac1591-ddc6-4d93-bec7-998cbfe27cc7',
name: 'Edit Fields1',
},
messages: [],
message: "Referenced node doesn't exist",
stack:
"NodeOperationError: Referenced node doesn't exist\n at ExecuteContext.execute (/Users/miloradfilipovic/workspace/n8n/packages/nodes-base/nodes/Set/v2/manual.mode.ts:256:9)\n at ExecuteContext.execute (/Users/miloradfilipovic/workspace/n8n/packages/nodes-base/nodes/Set/v2/SetV2.node.ts:351:48)\n at WorkflowExecute.runNode (/Users/miloradfilipovic/workspace/n8n/packages/core/src/execution-engine/workflow-execute.ts:1097:31)\n at /Users/miloradfilipovic/workspace/n8n/packages/core/src/execution-engine/workflow-execute.ts:1505:38\n at /Users/miloradfilipovic/workspace/n8n/packages/core/src/execution-engine/workflow-execute.ts:2066:11",
},
},
],
},
// @ts-expect-error Incomplete mock objects are expected
error: {
level: 'warning',
tags: {
packageName: 'workflow',
},
context: {
itemIndex: 0,
nodeCause: 'Edit Fields 2',
descriptionKey: 'nodeNotFound',
parameter: 'assignments',
},
functionality: 'regular',
name: 'NodeOperationError',
timestamp: 1737540693141,
node: {
parameters: {
mode: 'manual',
duplicateItem: false,
assignments: {
assignments: [
{
id: '969e86d0-76de-44f6-b07d-44a8a953f564',
name: 'name',
value: "={{ $('Edit Fields 2').name }}",
type: 'number',
},
],
},
includeOtherFields: false,
options: {},
},
type: 'n8n-nodes-base.set',
typeVersion: 3.4,
position: [540, 400],
id: '8eac1591-ddc6-4d93-bec7-998cbfe27cc7',
name: 'Edit Fields1',
},
messages: [],
message: "Referenced node doesn't exist",
stack:
"NodeOperationError: Referenced node doesn't exist\n at ExecuteContext.execute (/Users/miloradfilipovic/workspace/n8n/packages/nodes-base/nodes/Set/v2/manual.mode.ts:256:9)\n at ExecuteContext.execute (/Users/miloradfilipovic/workspace/n8n/packages/nodes-base/nodes/Set/v2/SetV2.node.ts:351:48)\n at WorkflowExecute.runNode (/Users/miloradfilipovic/workspace/n8n/packages/core/src/execution-engine/workflow-execute.ts:1097:31)\n at /Users/miloradfilipovic/workspace/n8n/packages/core/src/execution-engine/workflow-execute.ts:1505:38\n at /Users/miloradfilipovic/workspace/n8n/packages/core/src/execution-engine/workflow-execute.ts:2066:11",
},
lastNodeExecuted: 'Edit Fields1',
},
},
question: 'Hey',
},
};

View file

@ -4,6 +4,13 @@ import { useAIAssistantHelpers } from './useAIAssistantHelpers';
import { createTestingPinia } from '@pinia/testing';
import { setActivePinia } from 'pinia';
import type { IWorkflowDb } from '@/Interface';
import type { ChatRequest } from '@/types/assistant.types';
import {
ERROR_HELPER_TEST_PAYLOAD,
PAYLOAD_SIZE_FOR_1_PASS,
PAYLOAD_SIZE_FOR_2_PASSES,
SUPPORT_CHAT_TEST_PAYLOAD,
} from './useAIAssistantHelpers.test.constants';
const referencedNodesTestCases: Array<{ caseName: string; node: INode; expected: string[] }> = [
{
@ -549,3 +556,67 @@ describe('Simplify assistant payloads', () => {
}
});
});
describe('Trim Payload Size', () => {
let aiAssistantHelpers: ReturnType<typeof useAIAssistantHelpers>;
beforeEach(() => {
setActivePinia(createTestingPinia());
aiAssistantHelpers = useAIAssistantHelpers();
});
it('Should trim active node parameters in error helper payload', () => {
const payload = ERROR_HELPER_TEST_PAYLOAD;
aiAssistantHelpers.trimPayloadSize(payload);
expect((payload.payload as ChatRequest.InitErrorHelper).node.parameters).toEqual({});
});
it('Should trim all node parameters in support chat', () => {
// Testing the scenario where only one trimming pass is needed
// (payload is under the limit after removing all node parameters and execution data)
const payload: ChatRequest.RequestPayload = SUPPORT_CHAT_TEST_PAYLOAD;
const supportPayload: ChatRequest.InitSupportChat =
payload.payload as ChatRequest.InitSupportChat;
// Trimming to 4kb should be successful
expect(() =>
aiAssistantHelpers.trimPayloadSize(payload, PAYLOAD_SIZE_FOR_1_PASS),
).not.toThrow();
// All active node parameters should be removed
expect(supportPayload?.context?.activeNodeInfo?.node?.parameters).toEqual({});
// Also, all node parameters in the workflow should be removed
supportPayload.context?.currentWorkflow?.nodes?.forEach((node) => {
expect(node.parameters).toEqual({});
});
// Node parameters in the execution data should be removed
expect(supportPayload.context?.executionData?.runData).toEqual({});
if (
supportPayload.context?.executionData?.error &&
'node' in supportPayload.context.executionData.error
) {
expect(supportPayload.context?.executionData?.error?.node?.parameters).toEqual({});
}
// Context object should still be there
expect(supportPayload.context).to.be.an('object');
});
it('Should trim the whole context in support chat', () => {
// Testing the scenario where both trimming passes are needed
// (payload is over the limit after removing all node parameters and execution data)
const payload: ChatRequest.RequestPayload = SUPPORT_CHAT_TEST_PAYLOAD;
const supportPayload: ChatRequest.InitSupportChat =
payload.payload as ChatRequest.InitSupportChat;
// Trimming should be successful
expect(() =>
aiAssistantHelpers.trimPayloadSize(payload, PAYLOAD_SIZE_FOR_2_PASSES),
).not.toThrow();
// The whole context object should be removed
expect(supportPayload.context).not.toBeDefined();
});
it('Should throw an error if payload is too big after trimming', () => {
const payload = ERROR_HELPER_TEST_PAYLOAD;
expect(() => aiAssistantHelpers.trimPayloadSize(payload, 0.2)).toThrow();
});
});

View file

@ -14,9 +14,10 @@ import { executionDataToJson, getMainAuthField, getNodeAuthOptions } from '@/uti
import type { ChatRequest } from '@/types/assistant.types';
import { useWorkflowsStore } from '@/stores/workflows.store';
import { useDataSchema } from './useDataSchema';
import { VIEWS } from '@/constants';
import { AI_ASSISTANT_MAX_CONTENT_LENGTH, VIEWS } from '@/constants';
import { useI18n } from './useI18n';
import type { IWorkflowDb } from '@/Interface';
import { getObjectSizeInKB } from '@/utils/objectUtils';
const CANVAS_VIEWS = [VIEWS.NEW_WORKFLOW, VIEWS.WORKFLOW, VIEWS.EXECUTION_DEBUG];
const EXECUTION_VIEWS = [VIEWS.EXECUTION_PREVIEW];
@ -251,6 +252,64 @@ export const useAIAssistantHelpers = () => {
nodes: workflow.nodes,
});
/**
* Reduces AI Assistant request payload size to make it fit the specified content length.
* If, after two passes, the payload is still too big, throws an error'
* @param payload The request payload to trim
* @param size The maximum size of the payload in KB
*/
const trimPayloadToSize = (
payload: ChatRequest.RequestPayload,
size = AI_ASSISTANT_MAX_CONTENT_LENGTH,
): void => {
const requestPayload = payload.payload;
// For support chat, remove parameters from the active node object and all nodes in the workflow
if (requestPayload.type === 'init-support-chat') {
if (requestPayload.context?.activeNodeInfo?.node) {
requestPayload.context.activeNodeInfo.node.parameters = {};
}
if (requestPayload.context?.currentWorkflow) {
requestPayload.context.currentWorkflow?.nodes?.forEach((node) => {
node.parameters = {};
});
}
if (requestPayload.context?.executionData?.runData) {
requestPayload.context.executionData.runData = {};
}
if (
requestPayload.context?.executionData?.error &&
'node' in requestPayload.context?.executionData?.error
) {
if (requestPayload.context?.executionData?.error?.node) {
requestPayload.context.executionData.error.node.parameters = {};
}
}
// If the payload is still too big, remove the whole context object
if (getRequestPayloadSize(payload) > size) {
requestPayload.context = undefined;
}
// For error helper, remove parameters from the active node object
// This will leave just the error, user info and basic node structure in the payload
} else if (requestPayload.type === 'init-error-helper') {
requestPayload.node.parameters = {};
}
// If the payload is still too big, throw an error that will be shown to the user
if (getRequestPayloadSize(payload) > size) {
throw new Error(locale.baseText('aiAssistant.payloadTooBig.message'));
}
};
/**
* Get the size of the request payload in KB, returns 0 if the payload is not a valid object
*/
const getRequestPayloadSize = (payload: ChatRequest.RequestPayload): number => {
try {
return getObjectSizeInKB(payload.payload);
} catch (error) {
return 0;
}
};
return {
processNodeForAssistant,
getNodeInfoForAssistant,
@ -261,5 +320,6 @@ export const useAIAssistantHelpers = () => {
getReferencedNodes,
simplifyResultData,
simplifyWorkflowForAssistant,
trimPayloadSize: trimPayloadToSize,
};
};

View file

@ -907,3 +907,5 @@ export const APP_MODALS_ELEMENT_ID = 'app-modals';
export const NEW_SAMPLE_WORKFLOW_CREATED_CHANNEL = 'new-sample-sub-workflow-created';
export const AI_NODES_PACKAGE_NAME = '@n8n/n8n-nodes-langchain';
export const AI_ASSISTANT_MAX_CONTENT_LENGTH = 100; // in kilobytes

View file

@ -155,7 +155,8 @@
"aiAssistant.newSessionModal.message": "You already have an active AI Assistant session. Starting a new session will clear your current conversation history.",
"aiAssistant.newSessionModal.question": "Are you sure you want to start a new session?",
"aiAssistant.newSessionModal.confirm": "Start new session",
"aiAssistant.serviceError.message": "Unable to connect to n8n's AI service",
"aiAssistant.serviceError.message": "Unable to connect to n8n's AI service ({message})",
"aiAssistant.payloadTooBig.message": "Payload size is too large",
"aiAssistant.codeUpdated.message.title": "Assistant modified workflow",
"aiAssistant.codeUpdated.message.body1": "Open the",
"aiAssistant.codeUpdated.message.body2": "node to see the changes",

View file

@ -283,7 +283,7 @@ export const useAssistantStore = defineStore(STORES.ASSISTANT, () => {
stopStreaming();
assistantThinkingMessage.value = undefined;
addAssistantError(
`${locale.baseText('aiAssistant.serviceError.message')}: (${e.message})`,
locale.baseText('aiAssistant.serviceError.message', { interpolate: { message: e.message } }),
id,
retry,
);
@ -487,24 +487,25 @@ export const useAssistantStore = defineStore(STORES.ASSISTANT, () => {
openChat();
streaming.value = true;
const payload: ChatRequest.RequestPayload['payload'] = {
role: 'user',
type: 'init-error-helper',
user: {
firstName: usersStore.currentUser?.firstName ?? '',
},
error: context.error,
node: assistantHelpers.processNodeForAssistant(context.node, [
'position',
'parameters.notice',
]),
nodeInputData,
executionSchema: schemas,
authType,
};
chatWithAssistant(
rootStore.restApiContext,
{
payload: {
role: 'user',
type: 'init-error-helper',
user: {
firstName: usersStore.currentUser?.firstName ?? '',
},
error: context.error,
node: assistantHelpers.processNodeForAssistant(context.node, [
'position',
'parameters.notice',
]),
nodeInputData,
executionSchema: schemas,
authType,
},
payload,
},
(msg) => onEachStreamingMessage(msg, id),
() => onDoneStreaming(id),

View file

@ -58,7 +58,7 @@ export namespace ChatRequest {
user: {
firstName: string;
};
context?: UserContext;
context?: UserContext & WorkflowContext;
workflowContext?: WorkflowContext;
question: string;
}

View file

@ -1,4 +1,4 @@
import { STREAM_SEPERATOR, streamRequest } from './apiUtils';
import { ResponseError, STREAM_SEPERATOR, streamRequest } from './apiUtils';
describe('streamRequest', () => {
it('should stream data from the API endpoint', async () => {
@ -54,6 +54,54 @@ describe('streamRequest', () => {
expect(onErrorMock).not.toHaveBeenCalled();
});
it('should stream error response from the API endpoint', async () => {
const testError = { code: 500, message: 'Error happened' };
const encoder = new TextEncoder();
const mockResponse = new ReadableStream({
start(controller) {
controller.enqueue(encoder.encode(JSON.stringify(testError)));
controller.close();
},
});
const mockFetch = vi.fn().mockResolvedValue({
ok: false,
body: mockResponse,
});
global.fetch = mockFetch;
const onChunkMock = vi.fn();
const onDoneMock = vi.fn();
const onErrorMock = vi.fn();
await streamRequest(
{
baseUrl: 'https://api.example.com',
pushRef: '',
},
'/data',
{ key: 'value' },
onChunkMock,
onDoneMock,
onErrorMock,
);
expect(mockFetch).toHaveBeenCalledWith('https://api.example.com/data', {
method: 'POST',
body: JSON.stringify({ key: 'value' }),
credentials: 'include',
headers: {
'Content-Type': 'application/json',
'browser-id': expect.stringContaining('-'),
},
});
expect(onChunkMock).not.toHaveBeenCalled();
expect(onErrorMock).toHaveBeenCalledTimes(1);
expect(onErrorMock).toHaveBeenCalledWith(new ResponseError(testError.message));
});
it('should handle broken stream data', async () => {
const encoder = new TextEncoder();
const mockResponse = new ReadableStream({

View file

@ -198,7 +198,7 @@ export function unflattenExecutionData(fullExecutionData: IExecutionFlattedRespo
return returnData;
}
export async function streamRequest<T>(
export async function streamRequest<T extends object>(
context: IRestApiContext,
apiEndpoint: string,
payload: object,
@ -220,7 +220,7 @@ export async function streamRequest<T>(
try {
const response = await fetch(`${context.baseUrl}${apiEndpoint}`, assistantRequest);
if (response.ok && response.body) {
if (response.body) {
// Handle the streaming response
const reader = response.body.getReader();
const decoder = new TextDecoder('utf-8');
@ -252,7 +252,18 @@ export async function streamRequest<T>(
}
try {
onChunk?.(data);
if (response.ok) {
// Call chunk callback if request was successful
onChunk?.(data);
} else {
// Otherwise, call error callback
const message = 'message' in data ? data.message : response.statusText;
onError?.(
new ResponseError(String(message), {
httpStatusCode: response.status,
}),
);
}
} catch (e: unknown) {
if (e instanceof Error) {
onError?.(e);

View file

@ -1,4 +1,4 @@
import { isObjectOrArray, isObject, searchInObject } from '@/utils/objectUtils';
import { isObjectOrArray, isObject, searchInObject, getObjectSizeInKB } from '@/utils/objectUtils';
const testData = [1, '', true, null, undefined, new Date(), () => {}].map((value) => [
value,
@ -95,4 +95,63 @@ describe('objectUtils', () => {
assert(searchInObject({ a: ['b', { c: 'd' }] }, 'd'));
});
});
describe('getObjectSizeInKB', () => {
// Test null/undefined cases
it('returns 0 for null', () => {
expect(getObjectSizeInKB(null)).toBe(0);
});
it('returns 0 for undefined', () => {
expect(getObjectSizeInKB(undefined)).toBe(0);
});
// Test empty objects/arrays
it('returns correct size for empty object', () => {
expect(getObjectSizeInKB({})).toBe(0);
});
it('returns correct size for empty array', () => {
expect(getObjectSizeInKB([])).toBe(0);
});
// Test regular cases
it('calculates size for simple object correctly', () => {
const obj = { name: 'test' };
expect(getObjectSizeInKB(obj)).toBe(0.01);
});
it('calculates size for array correctly', () => {
const arr = [1, 2, 3];
expect(getObjectSizeInKB(arr)).toBe(0.01);
});
it('calculates size for nested object correctly', () => {
const obj = {
name: 'test',
nested: {
value: 123,
},
};
expect(getObjectSizeInKB(obj)).toBe(0.04);
});
// Test error cases
it('throws error for circular reference', () => {
type CircularObj = {
name: string;
self?: CircularObj;
};
const obj: CircularObj = { name: 'test' };
obj.self = obj;
expect(() => getObjectSizeInKB(obj)).toThrow('Failed to calculate object size');
});
it('handles special characters correctly', () => {
const obj = { name: '测试' };
expect(getObjectSizeInKB(obj)).toBe(0.02);
});
});
});

View file

@ -18,3 +18,35 @@ export const searchInObject = (obj: ObjectOrArray, searchString: string): boolea
? searchInObject(entry, searchString)
: entry?.toString().toLowerCase().includes(searchString.toLowerCase()),
);
/**
* Calculate the size of a stringified object in KB.
* @param {unknown} obj - The object to calculate the size of
* @returns {number} The size of the object in KB
* @throws {Error} If the object is not serializable
*/
export const getObjectSizeInKB = (obj: unknown): number => {
if (obj === null || obj === undefined) {
return 0;
}
if (
(typeof obj === 'object' && Object.keys(obj).length === 0) ||
(Array.isArray(obj) && obj.length === 0)
) {
// "{}" and "[]" both take 2 bytes in UTF-8
return Number((2 / 1024).toFixed(2));
}
try {
const str = JSON.stringify(obj);
// Using TextEncoder to get actual UTF-8 byte length (what we see in chrome dev tools)
const bytes = new TextEncoder().encode(str).length;
const kb = bytes / 1024;
return Number(kb.toFixed(2));
} catch (error) {
throw new Error(
`Failed to calculate object size: ${error instanceof Error ? error.message : 'Unknown error'}`,
);
}
};