mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 13:03:50 +00:00

* update: Add type * fix: update import statement for NextApiRequest type * fix: update imports to use type for LexicalEditor and EditorState * Refactor imports to use 'import type' for type-only imports across multiple files - Updated imports in various components and API files to use 'import type' for better clarity and to optimize TypeScript's type checking. - Ensured consistent usage of type imports in files related to chat, dataset, workflow, and user management. - Improved code readability and maintainability by distinguishing between value and type imports. * refactor: remove old ESLint configuration and add new rules - Deleted the old ESLint configuration file from the app project. - Added a new ESLint configuration file with updated rules and settings. - Changed imports to use type-only imports in various files for better clarity and performance. - Updated TypeScript configuration to remove unnecessary options. - Added an ESLint ignore file to exclude build and dependency directories from linting. * fix: update imports to use 'import type' for type-only imports in schema files
197 lines
5.4 KiB
TypeScript
197 lines
5.4 KiB
TypeScript
import { describe, expect, it } from 'vitest';
|
|
import { ChatItemValueTypeEnum, ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
|
import { FlowNodeTypeEnum } from '@fastgpt/global/core/workflow/node/constant';
|
|
import type { ChatItemType } from '@fastgpt/global/core/chat/type';
|
|
import {
|
|
transformPreviewHistories,
|
|
addStatisticalDataToHistoryItem
|
|
} from '@/global/core/chat/utils';
|
|
|
|
const mockResponseData = {
|
|
id: '1',
|
|
nodeId: '1',
|
|
moduleName: 'test',
|
|
moduleType: FlowNodeTypeEnum.chatNode
|
|
};
|
|
|
|
describe('transformPreviewHistories', () => {
|
|
it('should transform histories correctly with responseDetail=true', () => {
|
|
const histories: ChatItemType[] = [
|
|
{
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1.5
|
|
}
|
|
]
|
|
}
|
|
];
|
|
|
|
const result = transformPreviewHistories(histories, true);
|
|
|
|
expect(result[0]).toEqual({
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: undefined,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
|
|
it('should transform histories correctly with responseDetail=false', () => {
|
|
const histories: ChatItemType[] = [
|
|
{
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1.5
|
|
}
|
|
]
|
|
}
|
|
];
|
|
|
|
const result = transformPreviewHistories(histories, false);
|
|
|
|
expect(result[0]).toEqual({
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: undefined,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: undefined,
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
});
|
|
|
|
describe('addStatisticalDataToHistoryItem', () => {
|
|
it('should return original item if obj is not AI', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.Human,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }]
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should return original item if totalQuoteList is already defined', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
totalQuoteList: []
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should return original item if responseData is undefined', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }]
|
|
};
|
|
|
|
expect(addStatisticalDataToHistoryItem(item)).toBe(item);
|
|
});
|
|
|
|
it('should calculate statistics correctly', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 1.5,
|
|
historyPreview: [{ obj: ChatRoleEnum.AI, value: 'preview1' }]
|
|
},
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.datasetSearchNode,
|
|
quoteList: [{ id: '1', q: 'test', a: 'answer' }],
|
|
runningTime: 0.5
|
|
},
|
|
{
|
|
...mockResponseData,
|
|
moduleType: FlowNodeTypeEnum.tools,
|
|
runningTime: 1,
|
|
toolDetail: [
|
|
{
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 0.5
|
|
}
|
|
]
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
llmModuleAccount: 3,
|
|
totalQuoteList: [{ id: '1', q: 'test', a: 'answer' }],
|
|
historyPreviewLength: 1
|
|
});
|
|
});
|
|
|
|
it('should handle empty arrays and undefined values', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 0
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
llmModuleAccount: 1,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
|
|
it('should handle nested plugin and loop details', () => {
|
|
const item: ChatItemType = {
|
|
obj: ChatRoleEnum.AI,
|
|
value: [{ type: ChatItemValueTypeEnum.text, text: { content: 'test response' } }],
|
|
responseData: [
|
|
{
|
|
...mockResponseData,
|
|
runningTime: 1,
|
|
pluginDetail: [
|
|
{
|
|
moduleType: FlowNodeTypeEnum.chatNode,
|
|
runningTime: 0.5
|
|
}
|
|
],
|
|
loopDetail: [
|
|
{
|
|
moduleType: FlowNodeTypeEnum.tools,
|
|
runningTime: 0.3
|
|
}
|
|
]
|
|
}
|
|
]
|
|
};
|
|
|
|
const result = addStatisticalDataToHistoryItem(item);
|
|
|
|
expect(result).toEqual({
|
|
...item,
|
|
llmModuleAccount: 3,
|
|
totalQuoteList: [],
|
|
historyPreviewLength: undefined
|
|
});
|
|
});
|
|
});
|