mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 05:12:39 +00:00
perf: chat上下文截断;QA提示词
This commit is contained in:
@@ -35,7 +35,7 @@ export const modelList: ModelConstantsData[] = [
|
||||
model: ChatModelNameEnum.GPT35,
|
||||
trainName: '',
|
||||
maxToken: 4000,
|
||||
contextMaxToken: 7500,
|
||||
contextMaxToken: 7000,
|
||||
maxTemperature: 1.5,
|
||||
price: 3
|
||||
},
|
||||
|
@@ -61,7 +61,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
}
|
||||
|
||||
// 控制在 tokens 数量,防止超出
|
||||
// const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
|
||||
const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
|
||||
|
||||
// 格式化文本内容成 chatgpt 格式
|
||||
const map = {
|
||||
@@ -69,14 +69,25 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
AI: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
SYSTEM: ChatCompletionRequestMessageRoleEnum.System
|
||||
};
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = prompts.map((item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
}));
|
||||
// console.log(formatPrompts);
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = filterPrompts.map(
|
||||
(item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
})
|
||||
);
|
||||
|
||||
// 计算温度
|
||||
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);
|
||||
|
||||
// console.log({
|
||||
// model: model.service.chatModel,
|
||||
// temperature: temperature,
|
||||
// // max_tokens: modelConstantsData.maxToken,
|
||||
// messages: formatPrompts,
|
||||
// frequency_penalty: 0.5, // 越大,重复内容越少
|
||||
// presence_penalty: -0.5, // 越大,越容易出现新内容
|
||||
// stream: true,
|
||||
// stop: ['.!?。']
|
||||
// });
|
||||
// 获取 chatAPI
|
||||
const chatAPI = getOpenAIApi(userApiKey || systemKey);
|
||||
// 发出请求
|
||||
|
@@ -1,7 +1,7 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { connectToDatabase } from '@/service/mongo';
|
||||
import { authChat } from '@/service/utils/chat';
|
||||
import { httpsAgent, systemPromptFilter } from '@/service/utils/tools';
|
||||
import { httpsAgent, systemPromptFilter, openaiChatFilter } from '@/service/utils/tools';
|
||||
import { ChatCompletionRequestMessage, ChatCompletionRequestMessageRoleEnum } from 'openai';
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { jsonRes } from '@/service/response';
|
||||
@@ -79,7 +79,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
`vector <=> '[${promptVector}]' < ${similarity}`
|
||||
],
|
||||
order: [{ field: 'vector', mode: `<=> '[${promptVector}]'` }],
|
||||
limit: 30
|
||||
limit: 20
|
||||
});
|
||||
|
||||
const formatRedisPrompt: string[] = vectorSearch.rows.map((item) => `${item.q}\n${item.a}`);
|
||||
@@ -116,7 +116,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
}
|
||||
|
||||
// 控制在 tokens 数量,防止超出
|
||||
// const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
|
||||
const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
|
||||
|
||||
// 格式化文本内容成 chatgpt 格式
|
||||
const map = {
|
||||
@@ -124,10 +124,12 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
AI: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
SYSTEM: ChatCompletionRequestMessageRoleEnum.System
|
||||
};
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = prompts.map((item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
}));
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = filterPrompts.map(
|
||||
(item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
})
|
||||
);
|
||||
// console.log(formatPrompts);
|
||||
// 计算温度
|
||||
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);
|
||||
|
@@ -41,11 +41,11 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
const tokens = encode(splitText + chunk).length;
|
||||
if (tokens >= 4000) {
|
||||
// 超过 4000,不要这块内容
|
||||
textList.push(splitText);
|
||||
splitText && textList.push(splitText);
|
||||
splitText = chunk;
|
||||
} else if (tokens >= 3000) {
|
||||
// 超过 3000,取内容
|
||||
textList.push(splitText + chunk);
|
||||
splitText && textList.push(splitText + chunk);
|
||||
splitText = '';
|
||||
} else {
|
||||
//没超过 3000,继续添加
|
||||
|
@@ -74,16 +74,21 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
});
|
||||
}
|
||||
|
||||
// 控制在 tokens 数量,防止超出
|
||||
const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
|
||||
|
||||
// 格式化文本内容成 chatgpt 格式
|
||||
const map = {
|
||||
Human: ChatCompletionRequestMessageRoleEnum.User,
|
||||
AI: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
SYSTEM: ChatCompletionRequestMessageRoleEnum.System
|
||||
};
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = prompts.map((item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
}));
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = filterPrompts.map(
|
||||
(item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
})
|
||||
);
|
||||
// console.log(formatPrompts);
|
||||
// 计算温度
|
||||
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);
|
||||
|
@@ -1,6 +1,11 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { connectToDatabase, Model } from '@/service/mongo';
|
||||
import { httpsAgent, systemPromptFilter, authOpenApiKey } from '@/service/utils/tools';
|
||||
import {
|
||||
httpsAgent,
|
||||
systemPromptFilter,
|
||||
authOpenApiKey,
|
||||
openaiChatFilter
|
||||
} from '@/service/utils/tools';
|
||||
import { ChatCompletionRequestMessage, ChatCompletionRequestMessageRoleEnum } from 'openai';
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { jsonRes } from '@/service/response';
|
||||
@@ -93,7 +98,7 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
`vector <=> '[${promptVector}]' < ${similarity}`
|
||||
],
|
||||
order: [{ field: 'vector', mode: `<=> '[${promptVector}]'` }],
|
||||
limit: 30
|
||||
limit: 20
|
||||
});
|
||||
|
||||
const formatRedisPrompt: string[] = vectorSearch.rows.map((item) => `${item.q}\n${item.a}`);
|
||||
@@ -134,16 +139,21 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
});
|
||||
}
|
||||
|
||||
// 控制在 tokens 数量,防止超出
|
||||
const filterPrompts = openaiChatFilter(prompts, modelConstantsData.contextMaxToken);
|
||||
|
||||
// 格式化文本内容成 chatgpt 格式
|
||||
const map = {
|
||||
Human: ChatCompletionRequestMessageRoleEnum.User,
|
||||
AI: ChatCompletionRequestMessageRoleEnum.Assistant,
|
||||
SYSTEM: ChatCompletionRequestMessageRoleEnum.System
|
||||
};
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = prompts.map((item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
}));
|
||||
const formatPrompts: ChatCompletionRequestMessage[] = filterPrompts.map(
|
||||
(item: ChatItemType) => ({
|
||||
role: map[item.obj],
|
||||
content: item.value
|
||||
})
|
||||
);
|
||||
// console.log(formatPrompts);
|
||||
// 计算温度
|
||||
const temperature = modelConstantsData.maxTemperature * (model.temperature / 10);
|
||||
|
@@ -88,7 +88,7 @@ const Chat = ({ chatId }: { chatId: string }) => {
|
||||
throttle(() => {
|
||||
if (!ChatBox.current) return;
|
||||
const isBottom =
|
||||
ChatBox.current.scrollTop + ChatBox.current.clientHeight + 80 >=
|
||||
ChatBox.current.scrollTop + ChatBox.current.clientHeight + 150 >=
|
||||
ChatBox.current.scrollHeight;
|
||||
|
||||
isBottom && scrollToBottom('auto');
|
||||
|
@@ -86,7 +86,7 @@ const SelectFileModal = ({
|
||||
await postModelDataSplitData({
|
||||
modelId,
|
||||
text: fileText.replace(/\\n/g, '\n').replace(/\n+/g, '\n'),
|
||||
prompt: `下面是${prompt || '一段长文本'}`
|
||||
prompt: `下面是"${prompt || '一段长文本'}"`
|
||||
});
|
||||
toast({
|
||||
title: '导入数据成功,需要一段拆解和训练',
|
||||
|
@@ -45,7 +45,7 @@ const SelectUrlModal = ({
|
||||
await postModelDataSplitData({
|
||||
modelId,
|
||||
text: webText,
|
||||
prompt: `下面是${prompt || '一段长文本'}`
|
||||
prompt: `下面是"${prompt || '一段长文本'}"`
|
||||
});
|
||||
toast({
|
||||
title: '导入数据成功,需要一段拆解和训练',
|
||||
|
@@ -69,9 +69,9 @@ export async function generateQA(next = false): Promise<any> {
|
||||
const chatAPI = getOpenAIApi(userApiKey || systemKey);
|
||||
const systemPrompt: ChatCompletionRequestMessage = {
|
||||
role: 'system',
|
||||
content: `${
|
||||
dataItem.prompt || '下面是一段长文本'
|
||||
},请从中提取出5至30个问题和答案,并按以下格式返回: Q1:\nA1:\nQ2:\nA2:\n`
|
||||
content: `你是出题官.${
|
||||
dataItem.prompt || '下面是"一段长文本"'
|
||||
},从中选出5至20个题目和答案,题目包含问答题,计算题,代码题等.答案要详细.按格式返回: Q1:\nA1:\nQ2:\nA2:\n`
|
||||
};
|
||||
|
||||
// 请求 chatgpt 获取回答
|
||||
@@ -114,7 +114,8 @@ export async function generateQA(next = false): Promise<any> {
|
||||
};
|
||||
})
|
||||
.catch((err) => {
|
||||
console.log('QA 拆分错误', err);
|
||||
console.log('QA拆分错误');
|
||||
console.log(err.response?.status, err.response?.statusText, err.response?.data);
|
||||
return Promise.reject(err);
|
||||
})
|
||||
)
|
||||
|
Reference in New Issue
Block a user