mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 21:13:50 +00:00

* update doc * feat: Add coupon redemption feature for team subscriptions (#4595) * feat: Add coupon redemption feature for team subscriptions - Introduced `TeamCouponSub` and `TeamCouponSchema` types - Added `redeemCoupon` API endpoint - Updated UI to include a modal for coupon redemption - Added new icon and translations for "Redeem coupon" * perf: remove field teamId * perf: use dynamic import * refactor: move to page component * perf: coupon code * perf: mcp server * perf: test * auto layout (#4634) * fix 4.9.6 (#4631) * fix debug quote list * delete next text node match * fix extract default boolean value * export latest 100 chat items * fix quote item ui * doc * fix doc * feat: auto layout * perf: auto layout * fix: auto layout null * add start node --------- Co-authored-by: heheer <heheer@sealos.io> * fix: share link (#4644) * Add workflow run duration;Get audio duration (#4645) * add duration * get audio duration * Custom config path (#4649) * feat: 通过环境变量DATA_PATH获取配置文件目录 (#4622) 通过环境变量DATA_PATH获取配置文件目录,以应对不同的部署方式的多样化需求 * feat: custom configjson path * doc --------- Co-authored-by: John Chen <sss1991@163.com> * 程序api调用场景下,如果大量调用带有图片或视频,产生的聊天记录会导致后台mongo数据库异常。这个修改给api客户端一个禁止生成聊天记录的选项,避免这个后果。 (#3964) * update special chatId * perf: vector db rename * update operationLog (#4647) * update operationLog * combine operationLogMap * solve operationI18nLogMap bug * remoce log * feat: Rerank usage (#4654) * refresh concat when update (#4655) * fix: refresh code * perf: timer lock * Fix operationLog (#4657) * perf: http streamable mcp * add alipay (#4630) * perf: subplan ui * perf: pay code * hiden bank tip * Fix: pay error (#4665) * fix quote number (#4666) * remove log --------- Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com> Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: John Chen <sss1991@163.com> Co-authored-by: gaord <bengao168@msn.com> Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>
197 lines
5.4 KiB
TypeScript
197 lines
5.4 KiB
TypeScript
import { describe, expect, it } from 'vitest';
|
|
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
|
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
|
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
|
import {
|
|
transformPreviewHistories,
|
|
addStatisticalDataToHistoryItem
|
|
} from '@/global/core/chat/utils';
|
|
|
|
const mockResponseData = {
|
|
id: '1',
|
|
nodeId: '1',
|
|
moduleName: 'test',
|
|
moduleType: FlowNodeTypeEnum.chatNode
|
|
};
|
|
|
|
describe('transformPreviewHistories', () => {
|
|
it('should transform histories correctly with responseDetail=true', () => {
|
|
const histories: ChatItemType[] = [
|
|
{
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1.5
|
|
}
|
|
]
|
|
}
|
|
];
|
|
|
|
const result = transformPreviewHistories(histories, true);
|
|
|
|
expect(result[0]).toEqual({
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: undefined,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
|
|
it('should transform histories correctly with responseDetail=false', () => {
|
|
const histories: ChatItemType[] = [
|
|
{
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1.5
|
|
}
|
|
]
|
|
}
|
|
];
|
|
|
|
const result = transformPreviewHistories(histories, false);
|
|
|
|
expect(result[0]).toEqual({
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: undefined,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: undefined,
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('addStatisticalDataToHistoryItem', () => {
|
|
it('should return original item if obj is not AI', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.Human,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }]
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should return original item if totalQuoteList is already defined', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
totalQuoteList: []
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should return original item if responseData is undefined', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }]
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should calculate statistics correctly', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 1.5,
|
|
historyPreview: [{ obj: ChatRoleEnum.AI, value: 'preview1' }]
|
|
},
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.datasetSearchNode,
|
|
quoteList: [{ id: '1', q: 'test', a: 'answer' }],
|
|
runningTime: 0.5
|
|
},
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.tools,
|
|
runningTime: 1,
|
|
toolDetail: [
|
|
{
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 0.5
|
|
}
|
|
]
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
llmModuleAccount: 3,
|
|
totalQuoteList: [{ id: '1', q: 'test', a: 'answer' }],
|
|
historyPreviewLength: 1
|
|
});
|
|
});
|
|
|
|
it('should handle empty arrays and undefined values', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 0
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
|
|
it('should handle nested plugin and loop details', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1,
|
|
pluginDetail: [
|
|
{
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 0.5
|
|
}
|
|
],
|
|
loopDetail: [
|
|
{
|
|
moduleType: FlowNodeTypeEnum.tools,
|
|
runningTime: 0.3
|
|
}
|
|
]
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
llmModuleAccount: 3,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
});
|