mirror of
https://github.com/labring/FastGPT.git
synced 2026-05-07 01:02:55 +08:00
c93c3937e1
* refactor: fastgpt object storage & global proxy (#6155) * feat: migrate to fastgpt storage sdk * chore: rename env variable * chore: move to sdk dir * docs: object storage * CHORE * chore: storage mocks * chore: update docker-compose * fix: global proxy agent * fix: update COS proxy * refactor: use fetch instead of http.request * fix: axios request base url * fix: axios proxy request behavior * fix: bumps axios * fix: patch axios for proxy * fix: replace axios with proxied axios * fix: upload txt file encoding * clean code * fix: use "minio" for minio adapter (#6205) * fix: use minio client to delete files when using minio vendor (#6206) * doc * feat: filter citations and add response button control (#6170) * feat: filter citations and add response button control * i18n * fix * fix test * perf: chat api code * fix: workflow edge overlap and auto-align in folded loop nodes (#6204) * fix: workflow edge overlap and auto-align in folded loop nodes * sort * fix * fix edge * fix icon * perf: s3 file name * perf: admin get app api * perf: catch user error * fix: refactor useOrg hook to use debounced search key (#6180) * chore: comment minio adapter (#6207) * chore: filename with suffix random id * perf: s3 storage code * fix: encode filename when copy object --------- Co-authored-by: archer <545436317@qq.com> * fix: node card link * json * perf: chat index; * index * chat item soft delete (#6216) * chat item soft delete * temp * fix * remove code * perf: delete chat item --------- Co-authored-by: archer <545436317@qq.com> * feat: select wheather filter sensitive info when export apps (#6222) * fix some bugs (#6210) * fix v4.14.5 bugs * type * fix * fix * custom feedback * fix * code * fix * remove invalid function --------- Co-authored-by: archer <545436317@qq.com> * perf: test * fix file default local upload (#6223) * docs: improve object storage introduction (#6224) * doc --------- Co-authored-by: roy <whoeverimf5@gmail.com> Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
203 lines
5.6 KiB
TypeScript
203 lines
5.6 KiB
TypeScript
import { describe, expect, it } from 'vitest';
|
|
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
|
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
|
import type { ChatItemType } from '@fastgpt/global/core/chat/type';
|
|
import {
|
|
transformPreviewHistories,
|
|
addStatisticalDataToHistoryItem
|
|
} from '@/global/core/chat/utils';
|
|
|
|
const mockResponseData = {
|
|
id: '1',
|
|
nodeId: '1',
|
|
moduleName: 'test',
|
|
moduleType: FlowNodeTypeEnum.chatNode
|
|
};
|
|
|
|
describe('transformPreviewHistories', () => {
|
|
it('should transform histories correctly with responseDetail=true', () => {
|
|
const histories: ChatItemType[] = [
|
|
{
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1.5
|
|
}
|
|
]
|
|
}
|
|
];
|
|
|
|
const result = transformPreviewHistories(histories, true);
|
|
|
|
expect(result[0]).toEqual({
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: undefined,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
|
|
it('should transform histories correctly with responseDetail=false', () => {
|
|
const histories: ChatItemType[] = [
|
|
{
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1.5
|
|
}
|
|
]
|
|
}
|
|
];
|
|
|
|
const result = transformPreviewHistories(histories, false);
|
|
|
|
expect(result[0]).toEqual({
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: undefined,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: undefined,
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('addStatisticalDataToHistoryItem', () => {
|
|
it('should return original item if obj is not AI', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.Human,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }]
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should return original item if totalQuoteList is already defined', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
totalQuoteList: []
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should return original item if responseData is undefined', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }]
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should calculate statistics correctly', () => {
|
|
const quoteId = '507f1f77bcf86cd799439011'; // Valid 24-bit hex ID
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [
|
|
{
|
|
type: ChatItemValueTypeEnum.text,
|
|
text: { content: `test response with citation [${quoteId}](CITE)` }
|
|
}
|
|
],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 1.5,
|
|
historyPreview: [{ obj: ChatRoleEnum.AI, value: 'preview1' }]
|
|
},
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.datasetSearchNode,
|
|
quoteList: [{ id: quoteId, q: 'test', a: 'answer' }],
|
|
runningTime: 0.5
|
|
},
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.agent,
|
|
runningTime: 1,
|
|
toolDetail: [
|
|
{
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 0.5
|
|
}
|
|
]
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
llmModuleAccount: 3,
|
|
totalQuoteList: [{ id: quoteId, q: 'test', a: 'answer' }],
|
|
historyPreviewLength: 1
|
|
});
|
|
});
|
|
|
|
it('should handle empty arrays and undefined values', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 0
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
|
|
it('should handle nested plugin and loop details', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1,
|
|
pluginDetail: [
|
|
{
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 0.5
|
|
}
|
|
],
|
|
loopDetail: [
|
|
{
|
|
moduleType: FlowNodeTypeEnum.agent,
|
|
runningTime: 0.3
|
|
}
|
|
]
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
llmModuleAccount: 3,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
});
|