mirror of
https://github.com/labring/FastGPT.git
synced 2025-10-14 15:11:13 +00:00
feature: V4.12.2 (#5525)
* feat: favorite apps & quick apps with their own configuration (#5515) * chore: extract chat history and drawer; fix model selector * feat: display favourite apps and make it configurable * feat: favorite apps & quick apps with their own configuration * fix: fix tab title and add loading state for searching * fix: cascade delete favorite app and quick app while deleting relative app * chore: make improvements * fix: favourite apps ui * fix: add permission for quick apps * chore: fix permission & clear redundant code * perf: chat home page code * chatbox ui * fix: 4.12.2-dev (#5520) * fix: add empty placeholder; fix app quick status; fix tag and layout * chore: add tab query for the setting tabs * chore: use `useConfirm` hook instead of `MyModal` * remove log * fix: fix modal padding (#5521) * perf: manage app * feat: enhance model provider handling and update icon references (#5493) * perf: model provider * sdk package * refactor: create llm response (#5499) * feat: add LLM response processing functions, including the creation of stream-based and complete responses * feat: add volta configuration for node and pnpm versions * refactor: update LLM response handling and event structure in tool choice logic * feat: update LLM response structure and integrate with tool choice logic * refactor: clean up imports and remove unused streamResponse function in chat and toolChoice modules * refactor: rename answer variable to answerBuffer for clarity in LLM response handling * feat: enhance LLM response handling with tool options and integrate tools into chat and tool choice logic * refactor: remove volta configuration from package.json * refactor: reorganize LLM response types and ensure default values for token counts * refactor: streamline LLM response handling by consolidating response structure and removing redundant checks * refactor: enhance LLM response handling by consolidating tool options and streamlining event callbacks * fix: build error * refactor: update tool type definitions for consistency in tool handling * feat: llm request function * fix: ts * fix: ts * fix: ahook ts * fix: variable name * update lock * ts version * doc * remove log * fix: translation type * perf: workflow status check * fix: ts * fix: prompt tool call * fix: fix missing plugin interact window & make tag draggable (#5527) * fix: incorrect select quick apps state; filter apps type (#5528) * fix: usesafe translation * perf: add quickapp modal --------- Co-authored-by: 伍闲犬 <whoeverimf5@gmail.com> Co-authored-by: Ctrlz <143257420+ctrlz526@users.noreply.github.com> Co-authored-by: francis <zhichengfan18@gmail.com>
This commit is contained in:
935
test/cases/service/core/ai/llm/toolCall.test.ts
Normal file
935
test/cases/service/core/ai/llm/toolCall.test.ts
Normal file
@@ -0,0 +1,935 @@
|
||||
import {
|
||||
parsePromptToolCall,
|
||||
promptToolCallMessageRewrite
|
||||
} from '@fastgpt/service/core/ai/llm/promptToolCall';
|
||||
import type { ChatCompletionMessageParam, ChatCompletionTool } from '@fastgpt/global/core/ai/type';
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
describe('parsePromptToolCall function tests', () => {
|
||||
describe('Basic scenarios', () => {
|
||||
it('should return answer when input starts with 0:', () => {
|
||||
const input = '0: This is a regular response';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'This is a regular response'
|
||||
});
|
||||
});
|
||||
|
||||
it('should return answer when input starts with 0:(Chinese colon)', () => {
|
||||
const input = '0:This is a regular response with Chinese colon';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'This is a regular response with Chinese colon'
|
||||
});
|
||||
});
|
||||
|
||||
it('should return trimmed answer when input starts with 0: and has extra whitespace', () => {
|
||||
const input = ' 0: This is a response with whitespace ';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'This is a response with whitespace'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle 0: in the middle of string when within first 6 characters', () => {
|
||||
const input = 'Pre 0: This is the actual response';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'This is the actual response'
|
||||
});
|
||||
});
|
||||
|
||||
it('should not process 0: when beyond first 6 characters', () => {
|
||||
const input = 'Long prefix 0: This should not be processed';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'Long prefix 0: This should not be processed'
|
||||
});
|
||||
});
|
||||
|
||||
it('should return original string when no 0: prefix found and no tool call', () => {
|
||||
const input = 'This is just a regular string without any prefixes';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'This is just a regular string without any prefixes'
|
||||
});
|
||||
});
|
||||
|
||||
it('should parse valid tool call with 1:', () => {
|
||||
const input = '1: {"name": "get_weather", "arguments": {"location": "Tokyo"}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('get_weather');
|
||||
expect(result.toolCalls![0].function.arguments).toBe('{"location":"Tokyo"}');
|
||||
expect(result.toolCalls![0].type).toBe('function');
|
||||
expect(result.toolCalls![0].id).toBeDefined();
|
||||
expect(typeof result.toolCalls![0].id).toBe('string');
|
||||
});
|
||||
|
||||
it('should parse valid tool call with 1:(Chinese colon)', () => {
|
||||
const input = '1:{"name": "calculate", "arguments": {"expression": "2+2"}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('calculate');
|
||||
expect(result.toolCalls![0].function.arguments).toBe('{"expression":"2+2"}');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Tool call parsing', () => {
|
||||
it('should handle tool call with nested object arguments', () => {
|
||||
const input =
|
||||
'1: {"name": "complex_tool", "arguments": {"user": {"name": "John", "age": 30}, "settings": {"verbose": true}}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('complex_tool');
|
||||
expect(JSON.parse(result.toolCalls![0].function.arguments)).toEqual({
|
||||
user: { name: 'John', age: 30 },
|
||||
settings: { verbose: true }
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tool call with array arguments', () => {
|
||||
const input =
|
||||
'1: {"name": "process_list", "arguments": {"items": [1, 2, 3], "options": ["sort", "filter"]}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('process_list');
|
||||
expect(JSON.parse(result.toolCalls![0].function.arguments)).toEqual({
|
||||
items: [1, 2, 3],
|
||||
options: ['sort', 'filter']
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tool call with empty arguments', () => {
|
||||
const input = '1: {"name": "simple_tool", "arguments": {}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('simple_tool');
|
||||
expect(result.toolCalls![0].function.arguments).toBe('{}');
|
||||
});
|
||||
|
||||
it('should handle tool call with extra content before and after JSON', () => {
|
||||
const input =
|
||||
'Some text 1: extra {"name": "test_tool", "arguments": {"param": "value"}} more text';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('test_tool');
|
||||
expect(result.toolCalls![0].function.arguments).toBe('{"param":"value"}');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge cases and error handling', () => {
|
||||
it('should return error message for malformed JSON with 1:', () => {
|
||||
const input = '1: {"name": "tool", "arguments": invalid json}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'Tool run error'
|
||||
});
|
||||
});
|
||||
|
||||
it('should return error message for incomplete JSON with 1:', () => {
|
||||
const input = '1: {"name": "tool"';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'Tool run error'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle empty JSON object with 1: (creates tool call with undefined properties)', () => {
|
||||
const input = '1: {}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
// Empty object {} doesn't have name property, so it parses but creates invalid tool call
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should handle empty string input', () => {
|
||||
const input = '';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: ''
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle whitespace-only input', () => {
|
||||
const input = ' \n\t ';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: ''
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle input with only prefix', () => {
|
||||
const input = '1:';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'Tool run error'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle input with only prefix and whitespace', () => {
|
||||
const input = '1: ';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'Tool run error'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle JSON5 syntax in tool call', () => {
|
||||
const input = "1: {name: 'test_tool', arguments: {param: 'value', number: 42}}";
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('test_tool');
|
||||
expect(JSON.parse(result.toolCalls![0].function.arguments)).toEqual({
|
||||
param: 'value',
|
||||
number: 42
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tool call with simple strings (no escaping needed)', () => {
|
||||
const input =
|
||||
'1: {"name": "search", "arguments": {"query": "Hello world", "filter": "type:document"}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('search');
|
||||
expect(JSON.parse(result.toolCalls![0].function.arguments)).toEqual({
|
||||
query: 'Hello world',
|
||||
filter: 'type:document'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle input with multiple 0: occurrences - does not process if first one is beyond position 5', () => {
|
||||
const input = 'First 0: Second part 0: Third part';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
// The first '0:' is at position 6, which is > 5, so it's not processed
|
||||
expect(result).toEqual({
|
||||
answer: 'First 0: Second part 0: Third part'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle input with multiple 1: occurrences - fails to parse when extra text interferes', () => {
|
||||
const input =
|
||||
'Text 1: {"name": "tool1", "arguments": {"param": "value"}} more text 1: {"name": "tool2", "arguments": {}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
// The sliceJsonStr function can't properly extract JSON when there's extra text after
|
||||
expect(result).toEqual({
|
||||
answer: 'Tool run error'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tool name with underscores and numbers', () => {
|
||||
const input = '1: {"name": "get_user_data_v2", "arguments": {"user_id": 123}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('get_user_data_v2');
|
||||
expect(JSON.parse(result.toolCalls![0].function.arguments)).toEqual({
|
||||
user_id: 123
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle very long strings', () => {
|
||||
const longString = 'A'.repeat(10000);
|
||||
const input = `0: ${longString}`;
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: longString
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Unicode characters in tool arguments', () => {
|
||||
const input =
|
||||
'1: {"name": "translate", "arguments": {"text": "你好世界", "from": "zh", "to": "en"}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('translate');
|
||||
expect(JSON.parse(result.toolCalls![0].function.arguments)).toEqual({
|
||||
text: '你好世界',
|
||||
from: 'zh',
|
||||
to: 'en'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle mixed Chinese and English colons', () => {
|
||||
const input1 = '0: Answer with English colon';
|
||||
const input2 = '0:Answer with Chinese colon';
|
||||
const input3 = '1: {"name": "tool", "arguments": {"key": "value"}}';
|
||||
const input4 = '1:{"name": "tool", "arguments": {"key": "value"}}';
|
||||
|
||||
const result1 = parsePromptToolCall(input1);
|
||||
const result2 = parsePromptToolCall(input2);
|
||||
const result3 = parsePromptToolCall(input3);
|
||||
const result4 = parsePromptToolCall(input4);
|
||||
|
||||
expect(result1.answer).toBe('Answer with English colon');
|
||||
expect(result2.answer).toBe('Answer with Chinese colon');
|
||||
expect(result3.toolCalls).toHaveLength(1);
|
||||
expect(result4.toolCalls).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Boundary conditions', () => {
|
||||
it('should handle input with only numbers', () => {
|
||||
const input = '12345';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: '12345'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tool call with null arguments', () => {
|
||||
const input = '1: {"name": "null_test", "arguments": null}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('null_test');
|
||||
expect(result.toolCalls![0].function.arguments).toBe('null');
|
||||
});
|
||||
|
||||
it('should handle tool call with boolean and number values', () => {
|
||||
const input =
|
||||
'1: {"name": "mixed_types", "arguments": {"flag": true, "count": 0, "ratio": 3.14}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('mixed_types');
|
||||
expect(JSON.parse(result.toolCalls![0].function.arguments)).toEqual({
|
||||
flag: true,
|
||||
count: 0,
|
||||
ratio: 3.14
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle newlines in input - 0: beyond position limit', () => {
|
||||
const input = 'Line 1\n0: Line 2\nLine 3';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
// The '0:' appears after position 6, so it's not processed
|
||||
expect(result).toEqual({
|
||||
answer: 'Line 1\n0: Line 2\nLine 3'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tabs and special whitespace', () => {
|
||||
const input = '\t0:\tThis\thas\ttabs\t';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'This\thas\ttabs'
|
||||
});
|
||||
});
|
||||
|
||||
it('should not process 0: when it appears after position 5', () => {
|
||||
const input = 'Longer prefix 0: This should not be processed';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'Longer prefix 0: This should not be processed'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle 0: at exactly position 5', () => {
|
||||
const input = '12345 0: Should not be processed';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: '12345 0: Should not be processed'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Chinese colon priority (only when English colon not found)', () => {
|
||||
const input = '0:Chinese colon without English';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'Chinese colon without English'
|
||||
});
|
||||
});
|
||||
|
||||
it('should prioritize English colon over Chinese colon - but not when beyond position limit', () => {
|
||||
const input = '0: Chinese 0: English colon';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
// The English '0:' is at position 11, beyond the limit, so returns original string
|
||||
expect(result).toEqual({
|
||||
answer: '0: Chinese 0: English colon'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle valid 0: within newline constraints', () => {
|
||||
const input = '0: Line with proper prefix';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result).toEqual({
|
||||
answer: 'Line with proper prefix'
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle simple 1: tool call that works', () => {
|
||||
const input = '1: {"name": "tool1", "arguments": {"param": "value"}}';
|
||||
const result = parsePromptToolCall(input);
|
||||
|
||||
expect(result.answer).toBe('');
|
||||
expect(result.toolCalls).toHaveLength(1);
|
||||
expect(result.toolCalls![0].function.name).toBe('tool1');
|
||||
expect(JSON.parse(result.toolCalls![0].function.arguments)).toEqual({
|
||||
param: 'value'
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('promptToolCallMessageRewrite function tests', () => {
|
||||
describe('System message handling', () => {
|
||||
it('should add system message when none exists', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [{ role: 'user', content: 'Hello' }];
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'get_weather',
|
||||
description: 'Get weather info',
|
||||
parameters: { type: 'object', properties: {} }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].role).toBe('system');
|
||||
expect(result[0].content).toContain('你是一个智能机器人');
|
||||
expect(result[0].content).toContain('get_weather');
|
||||
expect(result[1]).toEqual({ role: 'user', content: 'Hello' });
|
||||
});
|
||||
|
||||
it('should update existing string system message', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'system', content: 'You are helpful' },
|
||||
{ role: 'user', content: 'Hello' }
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'calculator',
|
||||
description: 'Calculate math',
|
||||
parameters: { type: 'object', properties: {} }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].role).toBe('system');
|
||||
expect(result[0].content).toContain('You are helpful');
|
||||
expect(result[0].content).toContain('你是一个智能机器人');
|
||||
expect(result[0].content).toContain('calculator');
|
||||
});
|
||||
|
||||
it('should update existing array system message', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: 'system',
|
||||
content: [{ type: 'text', text: 'You are helpful' }]
|
||||
},
|
||||
{ role: 'user', content: 'Hello' }
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'search',
|
||||
description: 'Search tool',
|
||||
parameters: { type: 'object', properties: {} }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].role).toBe('system');
|
||||
expect(Array.isArray(result[0].content)).toBe(true);
|
||||
const content = result[0].content as Array<any>;
|
||||
expect(content).toHaveLength(2);
|
||||
expect(content[0]).toEqual({ type: 'text', text: 'You are helpful' });
|
||||
expect(content[1].type).toBe('text');
|
||||
expect(content[1].text).toContain('你是一个智能机器人');
|
||||
expect(content[1].text).toContain('search');
|
||||
});
|
||||
|
||||
it('should throw error for invalid system message content', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'system', content: null as any },
|
||||
{ role: 'user', content: 'Hello' }
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'test_tool',
|
||||
description: 'Test',
|
||||
parameters: { type: 'object', properties: {} }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
expect(() => promptToolCallMessageRewrite(messages, tools)).toThrow(
|
||||
'Prompt call invalid input'
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle multiple tools in system message', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [{ role: 'user', content: 'Hello' }];
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'tool1',
|
||||
description: 'First tool',
|
||||
parameters: { type: 'object', properties: { param1: { type: 'string' } } }
|
||||
}
|
||||
},
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'tool2',
|
||||
description: 'Second tool',
|
||||
parameters: { type: 'object', properties: { param2: { type: 'number' } } }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result[0].content).toContain('tool1');
|
||||
expect(result[0].content).toContain('tool2');
|
||||
expect(result[0].content).toContain('First tool');
|
||||
expect(result[0].content).toContain('Second tool');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Assistant message rewriting', () => {
|
||||
it('should rewrite assistant message with string content', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'assistant', content: 'Hi there!' }
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result[2].role).toBe('assistant');
|
||||
expect(result[2].content).toBe('0: Hi there!');
|
||||
});
|
||||
|
||||
it('should rewrite assistant message with tool calls', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'What is the weather?' },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{
|
||||
id: 'call_123',
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'get_weather',
|
||||
arguments: '{"location": "Tokyo"}'
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result[2].role).toBe('assistant');
|
||||
expect(result[2].content).toBe(
|
||||
'1: {"name":"get_weather","arguments":"{\\"location\\": \\"Tokyo\\"}"}'
|
||||
);
|
||||
expect(result[2]).not.toHaveProperty('tool_calls');
|
||||
});
|
||||
|
||||
it('should skip assistant message with no content and no tool calls', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'assistant', content: null }
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result[2].role).toBe('assistant');
|
||||
expect(result[2].content).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle assistant message with multiple tool calls (only first one used)', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{
|
||||
id: 'call_1',
|
||||
type: 'function',
|
||||
function: { name: 'tool1', arguments: '{"param": "value1"}' }
|
||||
},
|
||||
{
|
||||
id: 'call_2',
|
||||
type: 'function',
|
||||
function: { name: 'tool2', arguments: '{"param": "value2"}' }
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result[2].content).toBe(
|
||||
'1: {"name":"tool1","arguments":"{\\"param\\": \\"value1\\"}"}'
|
||||
);
|
||||
expect(result[2]).not.toHaveProperty('tool_calls');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Tool message rewriting', () => {
|
||||
it('should convert tool message to user message', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'What is the weather?' },
|
||||
{
|
||||
role: 'tool',
|
||||
tool_call_id: 'call_123',
|
||||
content: 'The weather is sunny'
|
||||
}
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result[2].role).toBe('user');
|
||||
expect(result[2].content).toBe('<ToolResponse>\nThe weather is sunny\n</ToolResponse>');
|
||||
expect(result[2]).not.toHaveProperty('tool_call_id');
|
||||
});
|
||||
|
||||
it('should handle multiple tool messages', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{
|
||||
role: 'tool',
|
||||
tool_call_id: 'call_1',
|
||||
content: 'Result 1'
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
tool_call_id: 'call_2',
|
||||
content: 'Result 2'
|
||||
}
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result[2].role).toBe('user');
|
||||
expect(result[2].content).toBe('<ToolResponse>\nResult 1\n</ToolResponse>');
|
||||
expect(result[3].role).toBe('user');
|
||||
expect(result[3].content).toBe('<ToolResponse>\nResult 2\n</ToolResponse>');
|
||||
});
|
||||
|
||||
it('should handle tool message with complex content', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'Test' },
|
||||
{
|
||||
role: 'tool',
|
||||
tool_call_id: 'call_123',
|
||||
content: JSON.stringify({ result: 'success', data: [1, 2, 3] })
|
||||
}
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result[2].role).toBe('user');
|
||||
expect(result[2].content).toBe(
|
||||
'<ToolResponse>\n{"result":"success","data":[1,2,3]}\n</ToolResponse>'
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Message immutability', () => {
|
||||
it('should not mutate original messages', () => {
|
||||
const originalMessages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'assistant', content: 'Hi there!' },
|
||||
{
|
||||
role: 'tool',
|
||||
tool_call_id: 'call_123',
|
||||
content: 'Tool result'
|
||||
}
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'test_tool',
|
||||
description: 'Test',
|
||||
parameters: { type: 'object', properties: {} }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const originalMessagesCopy = JSON.parse(JSON.stringify(originalMessages));
|
||||
promptToolCallMessageRewrite(originalMessages, tools);
|
||||
|
||||
expect(originalMessages).toEqual(originalMessagesCopy);
|
||||
});
|
||||
|
||||
it('should handle deeply nested message content without mutation', () => {
|
||||
const originalMessages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: 'system',
|
||||
content: [{ type: 'text', text: 'Original system message' }]
|
||||
},
|
||||
{ role: 'user', content: 'Hello' }
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'test_tool',
|
||||
description: 'Test',
|
||||
parameters: { type: 'object', properties: {} }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const originalMessagesCopy = JSON.parse(JSON.stringify(originalMessages));
|
||||
promptToolCallMessageRewrite(originalMessages, tools);
|
||||
|
||||
expect(originalMessages).toEqual(originalMessagesCopy);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Complex conversation flows', () => {
|
||||
it('should handle complete conversation with all message types', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'system', content: 'You are helpful' },
|
||||
{ role: 'user', content: 'What is the weather in Tokyo?' },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{
|
||||
id: 'call_123',
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'get_weather',
|
||||
arguments: '{"location": "Tokyo"}'
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
tool_call_id: 'call_123',
|
||||
content: 'The weather in Tokyo is sunny, 25°C'
|
||||
},
|
||||
{ role: 'assistant', content: 'The weather in Tokyo is sunny with a temperature of 25°C.' }
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'get_weather',
|
||||
description: 'Get current weather',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
location: { type: 'string', description: 'City name' }
|
||||
},
|
||||
required: ['location']
|
||||
}
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result).toHaveLength(5);
|
||||
|
||||
// System message should be updated
|
||||
expect(result[0].role).toBe('system');
|
||||
expect(result[0].content).toContain('You are helpful');
|
||||
expect(result[0].content).toContain('get_weather');
|
||||
|
||||
// User message unchanged
|
||||
expect(result[1]).toEqual({ role: 'user', content: 'What is the weather in Tokyo?' });
|
||||
|
||||
// Assistant with tool call should be rewritten
|
||||
expect(result[2].role).toBe('assistant');
|
||||
expect(result[2].content).toBe(
|
||||
'1: {"name":"get_weather","arguments":"{\\"location\\": \\"Tokyo\\"}"}'
|
||||
);
|
||||
expect(result[2]).not.toHaveProperty('tool_calls');
|
||||
|
||||
// Tool message should become user message
|
||||
expect(result[3].role).toBe('user');
|
||||
expect(result[3].content).toBe(
|
||||
'<ToolResponse>\nThe weather in Tokyo is sunny, 25°C\n</ToolResponse>'
|
||||
);
|
||||
|
||||
// Final assistant message should be prefixed
|
||||
expect(result[4].role).toBe('assistant');
|
||||
expect(result[4].content).toBe(
|
||||
'0: The weather in Tokyo is sunny with a temperature of 25°C.'
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle empty messages array', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [];
|
||||
const tools: ChatCompletionTool[] = [
|
||||
{
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'test_tool',
|
||||
description: 'Test',
|
||||
parameters: { type: 'object', properties: {} }
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].role).toBe('system');
|
||||
expect(result[0].content).toContain('你是一个智能机器人');
|
||||
expect(result[0].content).toContain('test_tool');
|
||||
});
|
||||
|
||||
it('should handle empty tools array', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'assistant', content: 'Hi there!' }
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[0].role).toBe('system');
|
||||
expect(result[0].content).toContain('你是一个智能机器人');
|
||||
expect(result[0].content).toContain('[]'); // Empty tools array in JSON
|
||||
expect(result[2].content).toBe('0: Hi there!');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge cases', () => {
|
||||
it('should handle assistant message with empty string content', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'assistant', content: '' }
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result[2].role).toBe('assistant');
|
||||
expect(result[2].content).toBe(''); // Empty string is falsy, so not processed
|
||||
});
|
||||
|
||||
it('should handle tool message with empty content', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{
|
||||
role: 'tool',
|
||||
tool_call_id: 'call_123',
|
||||
content: ''
|
||||
}
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result[2].role).toBe('user');
|
||||
expect(result[2].content).toBe('<ToolResponse>\n\n</ToolResponse>');
|
||||
});
|
||||
|
||||
it('should handle mixed message types in sequence', () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'assistant', content: 'Hi!' },
|
||||
{ role: 'user', content: 'How are you?' },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{
|
||||
id: 'call_1',
|
||||
type: 'function',
|
||||
function: { name: 'check_status', arguments: '{}' }
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
role: 'tool',
|
||||
tool_call_id: 'call_1',
|
||||
content: 'Status: OK'
|
||||
}
|
||||
];
|
||||
const tools: ChatCompletionTool[] = [];
|
||||
|
||||
const result = promptToolCallMessageRewrite(messages, tools);
|
||||
|
||||
expect(result).toHaveLength(6); // system + 5 original
|
||||
expect(result[1]).toEqual({ role: 'user', content: 'Hello' });
|
||||
expect(result[2].content).toBe('0: Hi!');
|
||||
expect(result[3]).toEqual({ role: 'user', content: 'How are you?' });
|
||||
expect(result[4].content).toBe('1: {"name":"check_status","arguments":"{}"}');
|
||||
expect(result[5].content).toBe('<ToolResponse>\nStatus: OK\n</ToolResponse>');
|
||||
});
|
||||
});
|
||||
});
|
862
test/cases/service/core/ai/llm/utils.test.ts
Normal file
862
test/cases/service/core/ai/llm/utils.test.ts
Normal file
@@ -0,0 +1,862 @@
|
||||
import {
|
||||
loadRequestMessages,
|
||||
filterGPTMessageByMaxContext
|
||||
} from '@fastgpt/service/core/ai/llm/utils';
|
||||
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { describe, expect, it, vi, beforeEach } from 'vitest';
|
||||
|
||||
// Mock external dependencies
|
||||
vi.mock('@fastgpt/service/common/string/tiktoken/index', () => ({
|
||||
countGptMessagesTokens: vi.fn()
|
||||
}));
|
||||
|
||||
vi.mock('@fastgpt/service/common/file/image/utils', () => ({
|
||||
getImageBase64: vi.fn()
|
||||
}));
|
||||
|
||||
vi.mock('@fastgpt/web/i18n/utils', () => ({
|
||||
i18nT: vi.fn((key: string) => key)
|
||||
}));
|
||||
|
||||
vi.mock('@fastgpt/service/common/system/log', () => ({
|
||||
addLog: {
|
||||
info: vi.fn(),
|
||||
warn: vi.fn()
|
||||
}
|
||||
}));
|
||||
|
||||
vi.mock('axios', () => ({
|
||||
default: {
|
||||
head: vi.fn()
|
||||
}
|
||||
}));
|
||||
|
||||
import { countGptMessagesTokens } from '@fastgpt/service/common/string/tiktoken/index';
|
||||
import { getImageBase64 } from '@fastgpt/service/common/file/image/utils';
|
||||
import { addLog } from '@fastgpt/service/common/system/log';
|
||||
|
||||
// @ts-ignore
|
||||
import axios from 'axios';
|
||||
|
||||
const mockCountGptMessagesTokens = vi.mocked(countGptMessagesTokens);
|
||||
const mockGetImageBase64 = vi.mocked(getImageBase64);
|
||||
const mockAxiosHead = vi.mocked(axios.head);
|
||||
|
||||
describe('filterGPTMessageByMaxContext function tests', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockCountGptMessagesTokens.mockResolvedValue(10);
|
||||
});
|
||||
|
||||
describe('Basic filtering scenarios', () => {
|
||||
it('should return empty array for invalid input', async () => {
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages: null as any,
|
||||
maxContext: 1000
|
||||
});
|
||||
expect(result).toEqual([]);
|
||||
|
||||
const result2 = await filterGPTMessageByMaxContext({
|
||||
messages: undefined as any,
|
||||
maxContext: 1000
|
||||
});
|
||||
expect(result2).toEqual([]);
|
||||
});
|
||||
|
||||
it('should return messages unchanged when less than 4 messages', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'You are helpful' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Hi there!' }
|
||||
];
|
||||
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
maxContext: 1000
|
||||
});
|
||||
|
||||
expect(result).toEqual(messages);
|
||||
});
|
||||
|
||||
it('should return only system prompts when no chat prompts exist', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System prompt 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System prompt 2' }
|
||||
];
|
||||
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
maxContext: 1000
|
||||
});
|
||||
|
||||
expect(result).toEqual(messages);
|
||||
});
|
||||
});
|
||||
|
||||
describe('System and chat prompt separation', () => {
|
||||
it('should correctly separate system prompts from chat prompts', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System 2' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Assistant 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User 2' }
|
||||
];
|
||||
|
||||
mockCountGptMessagesTokens
|
||||
.mockResolvedValueOnce(20) // system prompts
|
||||
.mockResolvedValueOnce(30) // user 2
|
||||
.mockResolvedValueOnce(25) // assistant 1 + user 1
|
||||
.mockResolvedValueOnce(15); // user 1
|
||||
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
maxContext: 1000
|
||||
});
|
||||
|
||||
expect(result).toHaveLength(5);
|
||||
expect(
|
||||
result.slice(0, 2).every((msg) => msg.role === ChatCompletionRequestMessageRoleEnum.System)
|
||||
).toBe(true);
|
||||
expect(
|
||||
result.slice(2).every((msg) => msg.role !== ChatCompletionRequestMessageRoleEnum.System)
|
||||
).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Context limiting behavior', () => {
|
||||
it('should filter out messages when context limit is exceeded', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Assistant 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User 2' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Assistant 2' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User 3' }
|
||||
];
|
||||
|
||||
mockCountGptMessagesTokens
|
||||
.mockResolvedValueOnce(50) // system prompts
|
||||
.mockResolvedValueOnce(60) // user 3 (exceeds remaining context)
|
||||
.mockResolvedValueOnce(40); // assistant 2 + user 2
|
||||
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
maxContext: 100
|
||||
});
|
||||
|
||||
// Should keep system + last complete conversation that fits
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].role).toBe(ChatCompletionRequestMessageRoleEnum.System);
|
||||
expect(result[1].content).toBe('User 3');
|
||||
});
|
||||
|
||||
it('should preserve at least one conversation round even if it exceeds context', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Large user message' },
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: 'Large assistant response'
|
||||
}
|
||||
];
|
||||
|
||||
mockCountGptMessagesTokens
|
||||
.mockResolvedValueOnce(20) // system prompts
|
||||
.mockResolvedValueOnce(200); // user + assistant (exceeds remaining context)
|
||||
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
maxContext: 50
|
||||
});
|
||||
|
||||
// Should still keep the conversation even though it exceeds context
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[1].content).toBe('Large user message');
|
||||
expect(result[2].content).toBe('Large assistant response');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Complex conversation patterns', () => {
|
||||
it('should handle user-assistant-tool conversation pattern', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Assistant 1' },
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
tool_call_id: 'call1',
|
||||
content: 'Tool 1'
|
||||
},
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User 2' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Assistant 2' },
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
tool_call_id: 'call2',
|
||||
content: 'Tool 2'
|
||||
},
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
tool_call_id: 'call3',
|
||||
content: 'Tool 3'
|
||||
}
|
||||
];
|
||||
|
||||
mockCountGptMessagesTokens
|
||||
.mockResolvedValueOnce(20) // system
|
||||
.mockResolvedValueOnce(50) // last group: assistant 2 + tool 2 + tool 3 + user 2
|
||||
.mockResolvedValueOnce(40); // previous group: assistant 1 + tool 1 + user 1
|
||||
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
maxContext: 1000
|
||||
});
|
||||
|
||||
expect(result).toHaveLength(8);
|
||||
expect(result[0].role).toBe(ChatCompletionRequestMessageRoleEnum.System);
|
||||
});
|
||||
|
||||
it('should handle multiple assistant messages in sequence', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Assistant 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Assistant 2' },
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
tool_call_id: 'call1',
|
||||
content: 'Tool result'
|
||||
},
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User 2' }
|
||||
];
|
||||
|
||||
mockCountGptMessagesTokens
|
||||
.mockResolvedValueOnce(30) // user 2
|
||||
.mockResolvedValueOnce(60); // assistant 1 + assistant 2 + tool + user 1
|
||||
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
maxContext: 1000
|
||||
});
|
||||
|
||||
expect(result).toHaveLength(5);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Edge cases', () => {
|
||||
it('should handle empty messages array', async () => {
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages: [],
|
||||
maxContext: 1000
|
||||
});
|
||||
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it('should handle zero maxContext', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User' }
|
||||
];
|
||||
|
||||
mockCountGptMessagesTokens
|
||||
.mockResolvedValueOnce(10) // system
|
||||
.mockResolvedValueOnce(20); // user
|
||||
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
maxContext: 0
|
||||
});
|
||||
|
||||
// Should still preserve at least one conversation
|
||||
expect(result).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('should handle negative maxContext', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'User' }
|
||||
];
|
||||
|
||||
mockCountGptMessagesTokens.mockResolvedValueOnce(20);
|
||||
|
||||
const result = await filterGPTMessageByMaxContext({
|
||||
messages,
|
||||
maxContext: -100
|
||||
});
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadRequestMessages function tests', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockGetImageBase64.mockResolvedValue({
|
||||
completeBase64: 'data:image/png;base64,test',
|
||||
base64: 'test',
|
||||
mime: 'image/png'
|
||||
});
|
||||
mockAxiosHead.mockResolvedValue({ status: 200 });
|
||||
});
|
||||
|
||||
describe('Basic message processing', () => {
|
||||
it('should reject empty messages array', async () => {
|
||||
await expect(
|
||||
loadRequestMessages({
|
||||
messages: []
|
||||
})
|
||||
).rejects.toMatch('common:core.chat.error.Messages empty');
|
||||
});
|
||||
|
||||
it('should process simple conversation', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'You are helpful' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Hi there!' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(3);
|
||||
expect(result[0].role).toBe(ChatCompletionRequestMessageRoleEnum.System);
|
||||
expect(result[0].content).toBe('You are helpful');
|
||||
expect(result[1].role).toBe(ChatCompletionRequestMessageRoleEnum.User);
|
||||
expect(result[1].content).toBe('Hello');
|
||||
expect(result[2].role).toBe(ChatCompletionRequestMessageRoleEnum.Assistant);
|
||||
expect(result[2].content).toBe('Hi there!');
|
||||
});
|
||||
});
|
||||
|
||||
describe('System message processing', () => {
|
||||
it('should handle string system content', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System prompt' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('System prompt');
|
||||
});
|
||||
|
||||
it('should handle array system content', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.System,
|
||||
content: [
|
||||
{ type: 'text', text: 'Part 1' },
|
||||
{ type: 'text', text: 'Part 2' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('Part 1\n\nPart 2');
|
||||
});
|
||||
|
||||
it('should filter out empty text in system content array', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.System,
|
||||
content: [
|
||||
{ type: 'text', text: 'Valid text' },
|
||||
{ type: 'text', text: '' },
|
||||
{ type: 'text', text: 'Another valid text' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('Valid text\n\nAnother valid text');
|
||||
});
|
||||
|
||||
it('should skip system message with empty content', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: '' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].role).toBe(ChatCompletionRequestMessageRoleEnum.User);
|
||||
});
|
||||
});
|
||||
|
||||
describe('User message processing with vision', () => {
|
||||
it('should process simple text user message', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello world' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('Hello world');
|
||||
});
|
||||
|
||||
it('should not extract images from short text by default', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||||
content: 'https://example.com/image.png'
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(typeof result[0].content).toBe('string');
|
||||
expect(result[0].content).toBe('https://example.com/image.png');
|
||||
});
|
||||
|
||||
it('should not extract images when useVision is false', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||||
content: 'Look at https://example.com/image.png'
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: false });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('Look at https://example.com/image.png');
|
||||
});
|
||||
|
||||
it('should not extract images from very long text (>500 chars)', async () => {
|
||||
const longText = 'A'.repeat(600) + ' https://example.com/image.png';
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: longText }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe(longText);
|
||||
});
|
||||
|
||||
it('should limit to 4 images and return text if more found', async () => {
|
||||
const textWithManyImages =
|
||||
'Images: ' +
|
||||
'https://example.com/1.png ' +
|
||||
'https://example.com/2.jpg ' +
|
||||
'https://example.com/3.gif ' +
|
||||
'https://example.com/4.webp ' +
|
||||
'https://example.com/5.png';
|
||||
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: textWithManyImages }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe(textWithManyImages);
|
||||
});
|
||||
|
||||
it('should handle array content with mixed types', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||||
content: [
|
||||
{ type: 'text', text: 'Hello' },
|
||||
{ type: 'image_url', image_url: { url: 'https://example.com/image.png' } }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
// When array content has only text items and filtered images, it becomes a string
|
||||
expect(typeof result[0].content).toBe('string');
|
||||
expect(result[0].content).toBe('Hello');
|
||||
});
|
||||
|
||||
it('should filter out empty text items from array content', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||||
content: [
|
||||
{ type: 'text', text: 'Valid text' },
|
||||
{ type: 'text', text: '' },
|
||||
{ type: 'text', text: 'Another text' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
const content = result[0].content as any[];
|
||||
expect(content).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Image processing', () => {
|
||||
it('should load local image to base64', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||||
content: [{ type: 'image_url', image_url: { url: '/local/image.png' } }]
|
||||
}
|
||||
];
|
||||
|
||||
mockGetImageBase64.mockResolvedValue({
|
||||
completeBase64: 'data:image/png;base64,localimage',
|
||||
base64: 'localimage',
|
||||
mime: 'image/png'
|
||||
});
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
const content = result[0].content as any[];
|
||||
expect(content[0].image_url.url).toBe('data:image/png;base64,localimage');
|
||||
});
|
||||
|
||||
it('should preserve base64 images as-is', async () => {
|
||||
const base64Image = 'data:image/png;base64,existingdata';
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||||
content: [{ type: 'image_url', image_url: { url: base64Image } }]
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
const content = result[0].content as any[];
|
||||
expect(content[0].image_url.url).toBe(base64Image);
|
||||
expect(mockGetImageBase64).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle invalid remote images gracefully', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||||
content: [
|
||||
{ type: 'text', text: 'Text' },
|
||||
{ type: 'image_url', image_url: { url: 'https://invalid.com/image.png' } }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
mockAxiosHead.mockRejectedValue(new Error('Network error'));
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
// When image is filtered out and only text remains, it becomes string
|
||||
expect(typeof result[0].content).toBe('string');
|
||||
expect(result[0].content).toBe('Text');
|
||||
});
|
||||
|
||||
it('should handle 405 status as valid image', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||||
content: [
|
||||
{ type: 'text', text: 'Check this image:' },
|
||||
{ type: 'image_url', image_url: { url: 'https://example.com/image.png' } }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const error = new Error('Method not allowed');
|
||||
(error as any).response = { status: 405 };
|
||||
mockAxiosHead.mockRejectedValue(error);
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
// The function processes images from array content differently, expects text to remain
|
||||
expect(typeof result[0].content).toBe('string');
|
||||
expect(result[0].content).toBe('Check this image:');
|
||||
});
|
||||
|
||||
it('should remove origin from image URLs when provided', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||||
content: [{ type: 'image_url', image_url: { url: 'https://mysite.com/images/test.png' } }]
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({
|
||||
messages,
|
||||
useVision: true,
|
||||
origin: 'https://mysite.com'
|
||||
});
|
||||
|
||||
// Just verify the function processes without error - axios call verification is complex
|
||||
expect(result).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Assistant message processing', () => {
|
||||
it('should process assistant message with string content', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Hi there!' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[1].content).toBe('Hi there!');
|
||||
});
|
||||
|
||||
it('should process assistant message with array content', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: [
|
||||
{ type: 'text', text: 'Part 1' },
|
||||
{ type: 'text', text: 'Part 2' }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[1].content).toBe('Part 1\nPart 2');
|
||||
});
|
||||
|
||||
it('should preserve tool_calls and function_call in assistant messages', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{
|
||||
id: 'call_123',
|
||||
type: 'function',
|
||||
function: { name: 'test_tool', arguments: '{}' }
|
||||
}
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect((result[1] as any).tool_calls).toHaveLength(1);
|
||||
expect((result[1] as any).tool_calls![0].function.name).toBe('test_tool');
|
||||
});
|
||||
|
||||
it('should handle assistant message with null content', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: null }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[1].content).toBe('null');
|
||||
});
|
||||
|
||||
it('should handle empty assistant content between other assistants', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'First' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: '' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Last' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
// Adjacent assistant messages get merged, empty content in middle gets filtered during merge
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[1].content).toBe('First\n\nLast');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Message merging behavior', () => {
|
||||
it('should merge consecutive system messages', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'System 2' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].role).toBe(ChatCompletionRequestMessageRoleEnum.System);
|
||||
// System messages when merged get converted to concatenated string
|
||||
expect(typeof result[0].content).toBe('string');
|
||||
expect(result[0].content).toBe('System 1\n\nSystem 2');
|
||||
});
|
||||
|
||||
it('should merge consecutive user messages', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Message 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Message 2' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Response' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0].role).toBe(ChatCompletionRequestMessageRoleEnum.User);
|
||||
// User messages get merged - final format may be array or string
|
||||
expect(result[0].content).toBeDefined();
|
||||
});
|
||||
|
||||
it('should merge consecutive assistant messages with content', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Part 1' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Part 2' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[1].role).toBe(ChatCompletionRequestMessageRoleEnum.Assistant);
|
||||
expect(result[1].content).toBe('Part 1\nPart 2');
|
||||
});
|
||||
|
||||
it('should not merge assistant messages when one has tool calls', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Text response' },
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{ id: 'call1', type: 'function', function: { name: 'tool', arguments: '{}' } }
|
||||
]
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(3); // Should not merge
|
||||
expect(result[1].content).toBe('Text response');
|
||||
expect((result[2] as any).tool_calls).toHaveLength(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Other message types', () => {
|
||||
it('should pass through tool messages unchanged', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
tool_call_id: 'call1',
|
||||
content: 'Tool result'
|
||||
}
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[1].role).toBe(ChatCompletionRequestMessageRoleEnum.Tool);
|
||||
expect(result[1].content).toBe('Tool result');
|
||||
});
|
||||
|
||||
it('should handle user message with empty content as null', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: '' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('null');
|
||||
});
|
||||
|
||||
it('should handle undefined user content', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: undefined as any }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0].content).toBe('null');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Complex scenarios', () => {
|
||||
it('should handle mixed conversation with all message types', async () => {
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.System, content: 'You are helpful' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'Hello' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'Hi!' },
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.User, content: 'How are you?' },
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
content: null,
|
||||
tool_calls: [
|
||||
{ id: 'call1', type: 'function', function: { name: 'check_status', arguments: '{}' } }
|
||||
]
|
||||
},
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
tool_call_id: 'call1',
|
||||
content: 'Status: OK'
|
||||
},
|
||||
{ role: ChatCompletionRequestMessageRoleEnum.Assistant, content: 'I am doing well!' }
|
||||
];
|
||||
|
||||
const result = await loadRequestMessages({ messages });
|
||||
|
||||
expect(result).toHaveLength(7);
|
||||
expect(result.map((msg) => msg.role)).toEqual([
|
||||
ChatCompletionRequestMessageRoleEnum.System,
|
||||
ChatCompletionRequestMessageRoleEnum.User,
|
||||
ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
ChatCompletionRequestMessageRoleEnum.User,
|
||||
ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
ChatCompletionRequestMessageRoleEnum.Tool,
|
||||
ChatCompletionRequestMessageRoleEnum.Assistant
|
||||
]);
|
||||
});
|
||||
|
||||
it('should handle environment variable MULTIPLE_DATA_TO_BASE64', async () => {
|
||||
const originalEnv = process.env.MULTIPLE_DATA_TO_BASE64;
|
||||
process.env.MULTIPLE_DATA_TO_BASE64 = 'true';
|
||||
|
||||
const messages: ChatCompletionMessageParam[] = [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.User,
|
||||
content: [{ type: 'image_url', image_url: { url: 'https://example.com/image.png' } }]
|
||||
}
|
||||
];
|
||||
|
||||
mockGetImageBase64.mockResolvedValue({
|
||||
completeBase64: 'data:image/png;base64,converted',
|
||||
base64: 'converted',
|
||||
mime: 'image/png'
|
||||
});
|
||||
|
||||
const result = await loadRequestMessages({ messages, useVision: true });
|
||||
|
||||
expect(mockGetImageBase64).toHaveBeenCalledWith('https://example.com/image.png');
|
||||
expect(result).toHaveLength(1);
|
||||
const content = result[0].content as any[];
|
||||
expect(content[0].image_url.url).toBe('data:image/png;base64,converted');
|
||||
|
||||
// Restore original environment
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.MULTIPLE_DATA_TO_BASE64 = originalEnv;
|
||||
} else {
|
||||
process.env.MULTIPLE_DATA_TO_BASE64 = '';
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
@@ -45,7 +45,11 @@ const testWorkflow = async (path: string) => {
|
||||
},
|
||||
runningUserInfo: {
|
||||
tmbId: 'test',
|
||||
teamId: 'test'
|
||||
teamId: 'test',
|
||||
username: 'test',
|
||||
teamName: 'test',
|
||||
memberName: 'test',
|
||||
contact: 'test'
|
||||
},
|
||||
timezone: 'Asia/Shanghai',
|
||||
externalProvider: {},
|
||||
|
Reference in New Issue
Block a user