mirror of
https://github.com/labring/FastGPT.git
synced 2026-05-07 01:02:55 +08:00
aaa7d17ef1
* feat: encapsulate logger (#6535) * feat: encapsulate logger * update engines --------- Co-authored-by: archer <545436317@qq.com> * next config * dev shell * Agent sandbox (#6532) * docs: switch to docs layout and apply black theme (#6533) * feat: add Gemini 3.1 models - Add gemini-3.1-pro-preview (released February 19, 2026) - Add gemini-3.1-flash-lite-preview (released March 3, 2026) Both models support: - 1M context window - 64k max response - Vision - Tool choice * docs: switch to docs layout and apply black theme - Change layout from notebook to docs - Update logo to icon + text format - Apply fumadocs black theme - Simplify global.css (keep only navbar and TOC styles) - Fix icon components to properly accept className props - Add mobile text overflow handling - Update Node engine requirement to >=20.x * doc * doc * lock * fix: ts * doc * doc --------- Co-authored-by: archer <archer@archerdeMac-mini.local> Co-authored-by: archer <545436317@qq.com> * Doc (#6493) * cloud doc * doc refactor * doc move * seo * remove doc * yml * doc * fix: tsconfig * fix: tsconfig * sandbox version (#6497) * sandbox version * add sandbox log * update lock * fix * fix: sandbox * doc * add console * i18n * sandbxo in agent * feat: agent sandbox * lock * feat: sandbox ui * sandbox check exists * env tempalte * doc * lock * sandbox in chat window * sandbox entry * fix: test * rename var * sandbox config tip * update sandbox lifecircle * update prompt * rename provider test * sandbox logger * yml --------- Co-authored-by: Archer <archer@fastgpt.io> Co-authored-by: archer <archer@archerdeMac-mini.local> * perf: sandbox error tip * Add sandbox limit and fix some issue (#6550) * sandbox in plan * fix: some issue * fix: test * editor default path * fix: comment * perf: sandbox worksapce * doc * perf: del sandbox * sandbox build * fix: test * fix: pr comment --------- Co-authored-by: Ryo <whoeverimf5@gmail.com> Co-authored-by: Archer <archer@fastgpt.io> Co-authored-by: archer <archer@archerdeMac-mini.local>
236 lines
6.1 KiB
TypeScript
236 lines
6.1 KiB
TypeScript
import { describe, expect, it } from 'vitest';
|
|
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
|
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
|
import type { ChatItemType } from '@fastgpt/global/core/chat/type';
|
|
import {
|
|
transformPreviewHistories,
|
|
addStatisticalDataToHistoryItem
|
|
} from '@/global/core/chat/utils';
|
|
|
|
const mockResponseData = {
|
|
id: '1',
|
|
nodeId: '1',
|
|
moduleName: 'test',
|
|
moduleType: FlowNodeTypeEnum.chatNode
|
|
};
|
|
|
|
describe('transformPreviewHistories', () => {
|
|
it('should transform histories correctly with responseDetail=true', () => {
|
|
const histories: ChatItemType[] = [
|
|
{
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1.5
|
|
}
|
|
]
|
|
}
|
|
];
|
|
|
|
const result = transformPreviewHistories(histories, true);
|
|
|
|
expect(result[0]).toEqual({
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ text: { content: 'test response' } }],
|
|
responseData: undefined,
|
|
useAgentSandbox: false,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
|
|
it('should transform histories correctly with responseDetail=false', () => {
|
|
const histories: ChatItemType[] = [
|
|
{
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1.5
|
|
}
|
|
]
|
|
}
|
|
];
|
|
|
|
const result = transformPreviewHistories(histories, false);
|
|
|
|
expect(result[0]).toEqual({
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ text: { content: 'test response' } }],
|
|
responseData: undefined,
|
|
useAgentSandbox: false,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: undefined,
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('addStatisticalDataToHistoryItem', () => {
|
|
it('should return original item if obj is not AI', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.Human,
|
|
value: [{ text: { content: 'test response' } }]
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should return original item if totalQuoteList is already defined', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ text: { content: 'test response' } }],
|
|
totalQuoteList: []
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should return original item if responseData is undefined', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ text: { content: 'test response' } }]
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should calculate statistics correctly', () => {
|
|
const quoteId = '507f1f77bcf86cd799439011'; // Valid 24-bit hex ID
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ text: { content: `test response with citation [${quoteId}](CITE)` } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 1.5,
|
|
historyPreview: [{ obj: ChatRoleEnum.AI, value: 'preview1' }]
|
|
},
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.datasetSearchNode,
|
|
quoteList: [
|
|
{
|
|
id: quoteId,
|
|
q: 'test',
|
|
a: 'answer',
|
|
datasetId: 'ds1',
|
|
collectionId: 'col1',
|
|
sourceName: 'source1',
|
|
chunkIndex: 0,
|
|
updateTime: new Date(),
|
|
score: []
|
|
}
|
|
],
|
|
runningTime: 0.5
|
|
},
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.toolCall,
|
|
runningTime: 1,
|
|
toolDetail: [
|
|
{
|
|
id: 'detail1',
|
|
nodeId: 'detailNode1',
|
|
moduleName: 'Detail Chat',
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 0.5
|
|
}
|
|
]
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
llmModuleAccount: 3,
|
|
useAgentSandbox: false,
|
|
totalQuoteList: [
|
|
{
|
|
id: quoteId,
|
|
q: 'test',
|
|
a: 'answer',
|
|
datasetId: 'ds1',
|
|
collectionId: 'col1',
|
|
sourceName: 'source1',
|
|
chunkIndex: 0,
|
|
updateTime: expect.any(Date),
|
|
score: []
|
|
}
|
|
],
|
|
historyPreviewLength: 1
|
|
});
|
|
});
|
|
|
|
it('should handle empty arrays and undefined values', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 0
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
useAgentSandbox: false,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
|
|
it('should handle nested plugin and loop details', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1,
|
|
pluginDetail: [
|
|
{
|
|
id: 'plugin1',
|
|
nodeId: 'pluginNode1',
|
|
moduleName: 'Plugin Chat',
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 0.5
|
|
}
|
|
],
|
|
loopDetail: [
|
|
{
|
|
id: 'loop1',
|
|
nodeId: 'loopNode1',
|
|
moduleName: 'Loop Tool',
|
|
moduleType: FlowNodeTypeEnum.toolCall,
|
|
runningTime: 0.3
|
|
}
|
|
]
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
useAgentSandbox: false,
|
|
llmModuleAccount: 3,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
});
|