V4.9.7 feature (#4669)

* update doc

* feat: Add coupon redemption feature for team subscriptions (#4595)

* feat: Add coupon redemption feature for team subscriptions

- Introduced `TeamCouponSub` and `TeamCouponSchema` types
- Added `redeemCoupon` API endpoint
- Updated UI to include a modal for coupon redemption
- Added new icon and translations for "Redeem coupon"

* perf: remove field teamId

* perf: use dynamic import

* refactor: move to page component

* perf: coupon code

* perf: mcp server

* perf: test

* auto layout (#4634)

* fix 4.9.6 (#4631)

* fix debug quote list

* delete next text node match

* fix extract default boolean value

* export latest 100 chat items

* fix quote item ui

* doc

* fix doc

* feat: auto layout

* perf: auto layout

* fix: auto layout null

* add start node

---------

Co-authored-by: heheer <heheer@sealos.io>

* fix: share link (#4644)

* Add workflow run duration;Get audio duration (#4645)

* add duration

* get audio duration

* Custom config path (#4649)

* feat: 通过环境变量DATA_PATH获取配置文件目录 (#4622)

通过环境变量DATA_PATH获取配置文件目录,以应对不同的部署方式的多样化需求

* feat: custom configjson path

* doc

---------

Co-authored-by: John Chen <sss1991@163.com>

* 程序api调用场景下,如果大量调用带有图片或视频,产生的聊天记录会导致后台mongo数据库异常。这个修改给api客户端一个禁止生成聊天记录的选项,避免这个后果。 (#3964)

* update special chatId

* perf: vector db rename

* update operationLog (#4647)

* update operationLog

* combine operationLogMap

* solve operationI18nLogMap bug

* remoce log

* feat: Rerank usage (#4654)

* refresh concat when update (#4655)

* fix: refresh code

* perf: timer lock

* Fix operationLog (#4657)

* perf: http streamable mcp

* add alipay (#4630)

* perf: subplan ui

* perf: pay code

* hiden bank tip

* Fix: pay error (#4665)

* fix quote number (#4666)

* remove log

---------

Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: John Chen <sss1991@163.com>
Co-authored-by: gaord <bengao168@msn.com>
Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com>
This commit is contained in:
Archer
2025-04-26 16:17:21 +08:00
committed by GitHub
parent a669a60fe6
commit 0720bbe4da
143 changed files with 2067 additions and 1093 deletions

View File

@@ -1,21 +1,28 @@
import { describe, expect, it } from 'vitest';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
import { ChatHistoryItemResType, ChatItemType } from '@fastgpt/global/core/chat/type';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import {
transformPreviewHistories,
addStatisticalDataToHistoryItem
} from '@/global/core/chat/utils';
const mockResponseData = {
id: '1',
nodeId: '1',
moduleName: 'test',
moduleType: FlowNodeTypeEnum.chatNode
};
describe('transformPreviewHistories', () => {
it('should transform histories correctly with responseDetail=true', () => {
const histories: ChatItemType[] = [
{
obj: ChatRoleEnum.AI,
value: 'test response',
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
responseData: [
{
moduleType: FlowNodeTypeEnum.chatNode,
...mockResponseData,
runningTime: 1.5
}
]
@@ -26,11 +33,10 @@ describe('transformPreviewHistories', () => {
expect(result[0]).toEqual({
obj: ChatRoleEnum.AI,
value: 'test response',
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
responseData: undefined,
llmModuleAccount: 1,
totalQuoteList: [],
totalRunningTime: 1.5,
historyPreviewLength: undefined
});
});
@@ -39,10 +45,10 @@ describe('transformPreviewHistories', () => {
const histories: ChatItemType[] = [
{
obj: ChatRoleEnum.AI,
value: 'test response',
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
responseData: [
{
moduleType: FlowNodeTypeEnum.chatNode,
...mockResponseData,
runningTime: 1.5
}
]
@@ -53,11 +59,10 @@ describe('transformPreviewHistories', () => {
expect(result[0]).toEqual({
obj: ChatRoleEnum.AI,
value: 'test response',
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
responseData: undefined,
llmModuleAccount: 1,
totalQuoteList: undefined,
totalRunningTime: 1.5,
historyPreviewLength: undefined
});
});
@@ -67,7 +72,7 @@ describe('addStatisticalDataToHistoryItem', () => {
it('should return original item if obj is not AI', () => {
const item: ChatItemType = {
obj: ChatRoleEnum.Human,
value: 'test'
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }]
};
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
@@ -76,7 +81,7 @@ describe('addStatisticalDataToHistoryItem', () => {
it('should return original item if totalQuoteList is already defined', () => {
const item: ChatItemType = {
obj: ChatRoleEnum.AI,
value: 'test',
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
totalQuoteList: []
};
@@ -86,7 +91,7 @@ describe('addStatisticalDataToHistoryItem', () => {
it('should return original item if responseData is undefined', () => {
const item: ChatItemType = {
obj: ChatRoleEnum.AI,
value: 'test'
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }]
};
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
@@ -95,19 +100,22 @@ describe('addStatisticalDataToHistoryItem', () => {
it('should calculate statistics correctly', () => {
const item: ChatItemType = {
obj: ChatRoleEnum.AI,
value: 'test',
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
responseData: [
{
...mockResponseData,
moduleType: FlowNodeTypeEnum.chatNode,
runningTime: 1.5,
historyPreview: ['preview1']
historyPreview: [{ obj: ChatRoleEnum.AI, value: 'preview1' }]
},
{
...mockResponseData,
moduleType: FlowNodeTypeEnum.datasetSearchNode,
quoteList: [{ id: '1', q: 'test', a: 'answer' }],
runningTime: 0.5
},
{
...mockResponseData,
moduleType: FlowNodeTypeEnum.tools,
runningTime: 1,
toolDetail: [
@@ -126,7 +134,6 @@ describe('addStatisticalDataToHistoryItem', () => {
...item,
llmModuleAccount: 3,
totalQuoteList: [{ id: '1', q: 'test', a: 'answer' }],
totalRunningTime: 3,
historyPreviewLength: 1
});
});
@@ -134,10 +141,10 @@ describe('addStatisticalDataToHistoryItem', () => {
it('should handle empty arrays and undefined values', () => {
const item: ChatItemType = {
obj: ChatRoleEnum.AI,
value: 'test',
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
responseData: [
{
moduleType: FlowNodeTypeEnum.chatNode,
...mockResponseData,
runningTime: 0
}
]
@@ -149,7 +156,6 @@ describe('addStatisticalDataToHistoryItem', () => {
...item,
llmModuleAccount: 1,
totalQuoteList: [],
totalRunningTime: 0,
historyPreviewLength: undefined
});
});
@@ -157,10 +163,10 @@ describe('addStatisticalDataToHistoryItem', () => {
it('should handle nested plugin and loop details', () => {
const item: ChatItemType = {
obj: ChatRoleEnum.AI,
value: 'test',
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
responseData: [
{
moduleType: FlowNodeTypeEnum.chatNode,
...mockResponseData,
runningTime: 1,
pluginDetail: [
{
@@ -184,7 +190,6 @@ describe('addStatisticalDataToHistoryItem', () => {
...item,
llmModuleAccount: 3,
totalQuoteList: [],
totalRunningTime: 1,
historyPreviewLength: undefined
});
});