mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-28 09:03:53 +00:00
v4.4.7-2 (#388)
This commit is contained in:
@@ -4,7 +4,7 @@ import { TrainingModeEnum } from '@/constants/plugin';
|
||||
import { ERROR_ENUM } from '../errorCode';
|
||||
import { sendInform } from '@/pages/api/user/inform/send';
|
||||
import { authBalanceByUid } from '../utils/auth';
|
||||
import { axiosConfig, getAIChatApi } from '@fastgpt/core/ai/config';
|
||||
import { getAIApi } from '@fastgpt/core/ai/config';
|
||||
import type { ChatCompletionRequestMessage } from '@fastgpt/core/ai/type';
|
||||
import { addLog } from '../utils/tools';
|
||||
import { splitText2Chunks } from '@/utils/file';
|
||||
@@ -58,8 +58,6 @@ export async function generateQA(): Promise<any> {
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
const chatAPI = getAIChatApi();
|
||||
|
||||
// request LLM to get QA
|
||||
const text = data.q;
|
||||
const messages: ChatCompletionRequestMessage[] = [
|
||||
@@ -73,19 +71,13 @@ export async function generateQA(): Promise<any> {
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
const { data: chatResponse } = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: global.qaModel.model,
|
||||
temperature: 0.01,
|
||||
messages,
|
||||
stream: false
|
||||
},
|
||||
{
|
||||
timeout: 480000,
|
||||
...axiosConfig()
|
||||
}
|
||||
);
|
||||
const ai = getAIApi(undefined, 480000);
|
||||
const chatResponse = await ai.chat.completions.create({
|
||||
model: global.qaModel.model,
|
||||
temperature: 0.01,
|
||||
messages,
|
||||
stream: false
|
||||
});
|
||||
const answer = chatResponse.choices?.[0].message?.content;
|
||||
const totalTokens = chatResponse.usage?.total_tokens || 0;
|
||||
|
||||
|
Reference in New Issue
Block a user