Files
FastGPT/test/mocks/core/ai/llm.ts
T
Archer 2ccb5b50c6 V4.14.4 features (#6036)
* feat: add query optimize and bill (#6021)

* add query optimize and bill

* perf: query extension

* fix: embe model

* remove log

* remove log

* fix: test

---------

Co-authored-by: xxyyh <2289112474@qq>
Co-authored-by: archer <545436317@qq.com>

* feat: notice (#6013)

* feat: record user's language

* feat: notice points/dataset indexes; support count limit; update docker-compose.yml

* fix: ts error

* feat: send auth code i18n

* chore: dataset notice limit

* chore: adjust

* fix: ts

* fix: countLimit race condition; i18n en-prefix locale fallback to en

---------

Co-authored-by: archer <545436317@qq.com>

* perf: comment

* perf: send inform code

* fix: type error (#6029)

* feat: add ip region for chat logs (#6010)

* feat: add ip region for chat logs

* refactor: use Geolite2.mmdb

* fix: export chat logs

* fix: return location directly

* test: add unit test

* perf: log show ip data

* adjust commercial plans (#6008)

* plan frontend

* plan limit

* coupon

* discount coupon

* fix

* type

* fix audit

* type

* plan name

* legacy plan

* track

* feat: add discount coupon

* fix

* fix discount coupon

* openapi

* type

* type

* env

* api type

* fix

* fix: simple agent plugin input & agent dashboard card (#6034)

* refactor: remove gridfs (#6031)

* fix: replace gridfs multer operations with s3 compatible ops

* wip: s3 features

* refactor: remove gridfs

* fix

* perf: mock test

* doc

* doc

* doc

* fix: test

* fix: s3

* fix: mock s3

* remove invalid config

* fix: init query extension

* initv4144 (#6037)

* chore: initv4144

* fix

* version

* fix: new plans (#6039)

* fix: new plans

* qr modal tip

* fix: buffer raw text filename (#6040)

* fix: initv4144 (#6041)

* fix: pay refresh (#6042)

* fix: migration shell

* rename collection

* clear timerlock

* clear timerlock

* perf: faq

* perf: bill schema

* fix: openapi

* doc

* fix: share var render

* feat: delete dataset queue

* plan usage display (#6043)

* plan usage display

* text

* fix

* fix: ts

* perf: remove invalid code

* perf: init shell

* doc

* perf: rename field

* perf: avatar presign

* init

* custom plan text (#6045)

* fix plans

* fix

* fixed

* computed

---------

Co-authored-by: archer <545436317@qq.com>

* init shell

* plan text & price page back button (#6046)

* init

* index

* delete dataset

* delete dataset

* perf: delete dataset

* init

---------

Co-authored-by: YeYuheng <57035043+YYH211@users.noreply.github.com>
Co-authored-by: xxyyh <2289112474@qq>
Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
Co-authored-by: Roy <whoeverimf5@gmail.com>
Co-authored-by: heheer <heheer@sealos.io>
2025-12-08 01:44:15 +08:00

140 lines
3.7 KiB
TypeScript

import { vi } from 'vitest';
import type { ChatCompletion } from '@fastgpt/global/core/ai/type';
/**
* Mock LLM response utilities for testing
*/
/**
* Create a mock non-streaming response with reason and text
* This simulates a complete response from models that support reasoning (like o1)
*/
export const createMockCompleteResponseWithReason = (options?: {
content?: string;
reasoningContent?: string;
finishReason?: 'stop' | 'length' | 'content_filter';
promptTokens?: number;
completionTokens?: number;
}): ChatCompletion => {
const {
content = 'This is the answer to your question.',
reasoningContent = 'First, I need to analyze the question...',
finishReason = 'stop',
promptTokens = 100,
completionTokens = 50
} = options || {};
return {
id: `chatcmpl-${Date.now()}`,
object: 'chat.completion',
created: Math.floor(Date.now() / 1000),
model: 'gpt-4o',
choices: [
{
index: 0,
message: {
role: 'assistant',
content,
reasoning_content: reasoningContent,
refusal: null
} as any,
logprobs: null,
finish_reason: finishReason
}
],
usage: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens
},
system_fingerprint: 'fp_test'
} as ChatCompletion;
};
/**
* Create a mock non-streaming response with tool calls
* This simulates a response where the model decides to call tools/functions
*/
export const createMockCompleteResponseWithTool = (options?: {
toolCalls?: Array<{
id?: string;
name: string;
arguments: string | Record<string, any>;
}>;
finishReason?: 'tool_calls' | 'stop';
promptTokens?: number;
completionTokens?: number;
}): ChatCompletion => {
const {
toolCalls = [
{
id: 'call_test_001',
name: 'get_weather',
arguments: { location: 'Beijing', unit: 'celsius' }
}
],
finishReason = 'tool_calls',
promptTokens = 120,
completionTokens = 30
} = options || {};
return {
id: `chatcmpl-${Date.now()}`,
object: 'chat.completion',
created: Math.floor(Date.now() / 1000),
model: 'gpt-4o',
choices: [
{
index: 0,
message: {
role: 'assistant',
content: null,
refusal: null,
tool_calls: toolCalls.map((call, index) => ({
id: call.id || `call_${Date.now()}_${index}`,
type: 'function' as const,
function: {
name: call.name,
arguments:
typeof call.arguments === 'string' ? call.arguments : JSON.stringify(call.arguments)
}
}))
},
logprobs: null,
finish_reason: finishReason
}
],
usage: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens
},
system_fingerprint: 'fp_test'
} as ChatCompletion;
};
/**
* Mock implementation for createChatCompletion
* Can be configured to return different types of responses based on test needs
*/
export const mockCreateChatCompletion = vi.fn(
async (body: any, options?: any): Promise<ChatCompletion> => {
// Default: return response with text
if (body.tools && body.tools.length > 0) {
return createMockCompleteResponseWithTool();
}
return createMockCompleteResponseWithReason();
}
);
/**
* Setup global mock for LLM request module
*/
vi.mock('@fastgpt/service/core/ai/llm/request', async (importOriginal) => {
const actual = (await importOriginal()) as any;
return {
...actual,
createChatCompletion: mockCreateChatCompletion
};
});