mirror of
https://github.com/labring/FastGPT.git
synced 2025-08-01 03:48:24 +00:00
v4.5 (#403)
This commit is contained in:
@@ -1,12 +1,12 @@
|
||||
import { Bill } from '@/service/mongo';
|
||||
import { MongoUser } from '@fastgpt/support/user/schema';
|
||||
import { BillSourceEnum } from '@/constants/user';
|
||||
import { getModel } from '@/service/utils/data';
|
||||
import { getModelMap, ModelTypeEnum } from '@/service/core/ai/model';
|
||||
import { ChatHistoryItemResType } from '@/types/chat';
|
||||
import { formatPrice } from '@fastgpt/common/bill/index';
|
||||
import { addLog } from '@/service/utils/tools';
|
||||
import type { CreateBillType } from '@/types/common/bill';
|
||||
import { defaultQGModel } from '@/pages/api/system/getInitData';
|
||||
import { defaultQGModels } from '@/constants/model';
|
||||
|
||||
async function createBill(data: CreateBillType) {
|
||||
try {
|
||||
@@ -106,7 +106,7 @@ export const pushQABill = async ({
|
||||
addLog.info('splitData generate success', { totalTokens });
|
||||
|
||||
// 获取模型单价格, 都是用 gpt35 拆分
|
||||
const unitPrice = global.qaModel.price || 3;
|
||||
const unitPrice = global.qaModels?.[0]?.price || 3;
|
||||
// 计算价格
|
||||
const total = unitPrice * totalTokens;
|
||||
|
||||
@@ -158,7 +158,7 @@ export const pushGenerateVectorBill = async ({
|
||||
{
|
||||
moduleName: '索引生成',
|
||||
amount: total,
|
||||
model: vectorModel.model,
|
||||
model: vectorModel.name,
|
||||
tokenLen
|
||||
}
|
||||
]
|
||||
@@ -167,14 +167,22 @@ export const pushGenerateVectorBill = async ({
|
||||
return { total };
|
||||
};
|
||||
|
||||
export const countModelPrice = ({ model, tokens }: { model: string; tokens: number }) => {
|
||||
const modelData = getModel(model);
|
||||
export const countModelPrice = ({
|
||||
model,
|
||||
tokens,
|
||||
type
|
||||
}: {
|
||||
model: string;
|
||||
tokens: number;
|
||||
type: `${ModelTypeEnum}`;
|
||||
}) => {
|
||||
const modelData = getModelMap?.[type]?.(model);
|
||||
if (!modelData) return 0;
|
||||
return modelData.price * tokens;
|
||||
};
|
||||
|
||||
export const pushQuestionGuideBill = ({ tokens, userId }: { tokens: number; userId: string }) => {
|
||||
const qgModel = global.qgModel || defaultQGModel;
|
||||
const qgModel = global.qgModels?.[0] || defaultQGModels[0];
|
||||
const total = qgModel.price * tokens;
|
||||
createBill({
|
||||
userId,
|
||||
|
@@ -1,39 +0,0 @@
|
||||
import type { NextApiResponse } from 'next';
|
||||
|
||||
export function responseWriteController({
|
||||
res,
|
||||
readStream
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
readStream: any;
|
||||
}) {
|
||||
res.on('drain', () => {
|
||||
readStream.resume();
|
||||
});
|
||||
|
||||
return (text: string | Buffer) => {
|
||||
const writeResult = res.write(text);
|
||||
if (!writeResult) {
|
||||
readStream.pause();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export function responseWrite({
|
||||
res,
|
||||
write,
|
||||
event,
|
||||
data
|
||||
}: {
|
||||
res?: NextApiResponse;
|
||||
write?: (text: string) => void;
|
||||
event?: string;
|
||||
data: string;
|
||||
}) {
|
||||
const Write = write || res?.write;
|
||||
|
||||
if (!Write) return;
|
||||
|
||||
event && Write(`event: ${event}\n`);
|
||||
Write(`data: ${data}\n\n`);
|
||||
}
|
68
projects/app/src/service/core/ai/model.ts
Normal file
68
projects/app/src/service/core/ai/model.ts
Normal file
@@ -0,0 +1,68 @@
|
||||
import {
|
||||
defaultChatModels,
|
||||
defaultCQModels,
|
||||
defaultExtractModels,
|
||||
defaultQAModels,
|
||||
defaultQGModels,
|
||||
defaultVectorModels
|
||||
} from '@/constants/model';
|
||||
|
||||
export const getChatModel = (model?: string) => {
|
||||
return (
|
||||
(global.chatModels || defaultChatModels).find((item) => item.model === model) ||
|
||||
defaultChatModels[0]
|
||||
);
|
||||
};
|
||||
export const getQAModel = (model?: string) => {
|
||||
return (
|
||||
(global.qaModels || defaultQAModels).find((item) => item.model === model) ||
|
||||
global.qaModels?.[0] ||
|
||||
defaultQAModels[0]
|
||||
);
|
||||
};
|
||||
export const getCQModel = (model?: string) => {
|
||||
return (
|
||||
(global.cqModels || defaultCQModels).find((item) => item.model === model) ||
|
||||
global.cqModels?.[0] ||
|
||||
defaultCQModels[0]
|
||||
);
|
||||
};
|
||||
export const getExtractModel = (model?: string) => {
|
||||
return (
|
||||
(global.extractModels || defaultExtractModels).find((item) => item.model === model) ||
|
||||
global.extractModels?.[0] ||
|
||||
defaultExtractModels[0]
|
||||
);
|
||||
};
|
||||
export const getQGModel = (model?: string) => {
|
||||
return (
|
||||
(global.qgModels || defaultQGModels).find((item) => item.model === model) ||
|
||||
global.qgModels?.[0] ||
|
||||
defaultQGModels[0]
|
||||
);
|
||||
};
|
||||
|
||||
export const getVectorModel = (model?: string) => {
|
||||
return (
|
||||
global.vectorModels.find((item) => item.model === model) ||
|
||||
global.vectorModels?.[0] ||
|
||||
defaultVectorModels[0]
|
||||
);
|
||||
};
|
||||
|
||||
export enum ModelTypeEnum {
|
||||
chat = 'chat',
|
||||
qa = 'qa',
|
||||
cq = 'cq',
|
||||
extract = 'extract',
|
||||
qg = 'qg',
|
||||
vector = 'vector'
|
||||
}
|
||||
export const getModelMap = {
|
||||
[ModelTypeEnum.chat]: getChatModel,
|
||||
[ModelTypeEnum.qa]: getQAModel,
|
||||
[ModelTypeEnum.cq]: getCQModel,
|
||||
[ModelTypeEnum.extract]: getExtractModel,
|
||||
[ModelTypeEnum.qg]: getQGModel,
|
||||
[ModelTypeEnum.vector]: getVectorModel
|
||||
};
|
12
projects/app/src/service/core/app/module.ts
Normal file
12
projects/app/src/service/core/app/module.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import { AppModuleItemType } from '@/types/app';
|
||||
|
||||
export const getChatModelNameListByModules = (modules: AppModuleItemType[]): string[] => {
|
||||
const chatModules = modules.filter((item) => item.flowType === FlowModuleTypeEnum.chatNode);
|
||||
return chatModules
|
||||
.map((item) => {
|
||||
const model = item.inputs.find((input) => input.key === 'model')?.value;
|
||||
return global.chatModels.find((item) => item.model === model)?.name || '';
|
||||
})
|
||||
.filter((item) => item);
|
||||
};
|
@@ -73,7 +73,7 @@ export async function generateQA(): Promise<any> {
|
||||
];
|
||||
const ai = getAIApi(undefined, 480000);
|
||||
const chatResponse = await ai.chat.completions.create({
|
||||
model: global.qaModel.model,
|
||||
model: global.qaModels[0].model,
|
||||
temperature: 0.01,
|
||||
messages,
|
||||
stream: false
|
||||
|
@@ -10,9 +10,11 @@ import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import type { ModuleDispatchProps } from '@/types/core/chat/type';
|
||||
import { replaceVariable } from '@/utils/common/tools/text';
|
||||
import { Prompt_CQJson } from '@/global/core/prompt/agent';
|
||||
import { defaultCQModel } from '@/pages/api/system/getInitData';
|
||||
import { FunctionModelItemType } from '@/types/model';
|
||||
import { getCQModel } from '@/service/core/ai/model';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
model: string;
|
||||
systemPrompt?: string;
|
||||
history?: ChatItemType[];
|
||||
[SystemInputEnum.userChatInput]: string;
|
||||
@@ -30,20 +32,26 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
|
||||
const {
|
||||
moduleName,
|
||||
user,
|
||||
inputs: { agents, userChatInput }
|
||||
inputs: { model, agents, userChatInput }
|
||||
} = props as Props;
|
||||
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const cqModel = global.cqModel || defaultCQModel;
|
||||
const cqModel = getCQModel(model);
|
||||
|
||||
const { arg, tokens } = await (async () => {
|
||||
if (cqModel.functionCall) {
|
||||
return functionCall(props);
|
||||
return functionCall({
|
||||
...props,
|
||||
cqModel
|
||||
});
|
||||
}
|
||||
return completions(props);
|
||||
return completions({
|
||||
...props,
|
||||
cqModel
|
||||
});
|
||||
})();
|
||||
|
||||
const result = agents.find((item) => item.key === arg?.type) || agents[agents.length - 1];
|
||||
@@ -64,45 +72,45 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
|
||||
|
||||
async function functionCall({
|
||||
user,
|
||||
cqModel,
|
||||
inputs: { agents, systemPrompt, history = [], userChatInput }
|
||||
}: Props) {
|
||||
const cqModel = global.cqModel;
|
||||
|
||||
}: Props & { cqModel: FunctionModelItemType }) {
|
||||
const messages: ChatItemType[] = [
|
||||
...(systemPrompt
|
||||
? [
|
||||
{
|
||||
obj: ChatRoleEnum.System,
|
||||
value: systemPrompt
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...history,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: userChatInput
|
||||
value: systemPrompt
|
||||
? `补充的背景知识:
|
||||
"""
|
||||
${systemPrompt}
|
||||
"""
|
||||
我的问题: ${userChatInput}
|
||||
`
|
||||
: userChatInput
|
||||
}
|
||||
];
|
||||
|
||||
const filterMessages = ChatContextFilter({
|
||||
messages,
|
||||
maxTokens: cqModel.maxToken
|
||||
});
|
||||
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
|
||||
|
||||
// function body
|
||||
// function body
|
||||
const agentFunction = {
|
||||
name: agentFunName,
|
||||
description: '判断用户问题的类型属于哪方面,返回对应的字段',
|
||||
description: '请根据对话记录及补充的背景知识,判断用户的问题类型,并返回对应的字段',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
description: agents.map((item) => `${item.value},返回:'${item.key}'`).join(';'),
|
||||
description: `判断用户的问题类型,并返回对应的字段。下面是几种问题类型: ${agents
|
||||
.map((item) => `${item.value},返回:'${item.key}'`)
|
||||
.join(';')}`,
|
||||
enum: agents.map((item) => item.key)
|
||||
}
|
||||
},
|
||||
required: ['type']
|
||||
}
|
||||
}
|
||||
};
|
||||
const ai = getAIApi(user.openaiAccount, 48000);
|
||||
@@ -133,15 +141,14 @@ async function functionCall({
|
||||
}
|
||||
|
||||
async function completions({
|
||||
cqModel,
|
||||
user,
|
||||
inputs: { agents, systemPrompt = '', history = [], userChatInput }
|
||||
}: Props) {
|
||||
const extractModel = global.extractModel;
|
||||
|
||||
}: Props & { cqModel: FunctionModelItemType }) {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: replaceVariable(extractModel.prompt || Prompt_CQJson, {
|
||||
value: replaceVariable(cqModel.functionPrompt || Prompt_CQJson, {
|
||||
systemPrompt,
|
||||
typeList: agents.map((item) => `ID: "${item.key}", 问题类型:${item.value}`).join('\n'),
|
||||
text: `${history.map((item) => `${item.obj}:${item.value}`).join('\n')}
|
||||
@@ -153,7 +160,7 @@ Human:${userChatInput}`
|
||||
const ai = getAIApi(user.openaiAccount, 480000);
|
||||
|
||||
const data = await ai.chat.completions.create({
|
||||
model: extractModel.model,
|
||||
model: cqModel.model,
|
||||
temperature: 0.01,
|
||||
messages: adaptChat2GptMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
|
@@ -9,7 +9,7 @@ import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import type { ModuleDispatchProps } from '@/types/core/chat/type';
|
||||
import { Prompt_ExtractJson } from '@/global/core/prompt/agent';
|
||||
import { replaceVariable } from '@/utils/common/tools/text';
|
||||
import { defaultExtractModel } from '@/pages/api/system/getInitData';
|
||||
import { FunctionModelItemType } from '@/types/model';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
history?: ChatItemType[];
|
||||
@@ -37,13 +37,19 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const extractModel = global.extractModel || defaultExtractModel;
|
||||
const extractModel = global.extractModels[0];
|
||||
|
||||
const { arg, tokens } = await (async () => {
|
||||
if (extractModel.functionCall) {
|
||||
return functionCall(props);
|
||||
return functionCall({
|
||||
...props,
|
||||
extractModel
|
||||
});
|
||||
}
|
||||
return completions(props);
|
||||
return completions({
|
||||
...props,
|
||||
extractModel
|
||||
});
|
||||
})();
|
||||
|
||||
// remove invalid key
|
||||
@@ -83,11 +89,10 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
}
|
||||
|
||||
async function functionCall({
|
||||
extractModel,
|
||||
user,
|
||||
inputs: { history = [], content, extractKeys, description }
|
||||
}: Props) {
|
||||
const extractModel = global.extractModel;
|
||||
|
||||
}: Props & { extractModel: FunctionModelItemType }) {
|
||||
const messages: ChatItemType[] = [
|
||||
...history,
|
||||
{
|
||||
@@ -152,15 +157,14 @@ async function functionCall({
|
||||
}
|
||||
|
||||
async function completions({
|
||||
extractModel,
|
||||
user,
|
||||
inputs: { history = [], content, extractKeys, description }
|
||||
}: Props) {
|
||||
const extractModel = global.extractModel;
|
||||
|
||||
}: Props & { extractModel: FunctionModelItemType }) {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: replaceVariable(extractModel.prompt || Prompt_ExtractJson, {
|
||||
value: replaceVariable(extractModel.functionPrompt || Prompt_ExtractJson, {
|
||||
description,
|
||||
json: extractKeys
|
||||
.map(
|
||||
|
@@ -7,7 +7,6 @@ import { textAdaptGptResponse } from '@/utils/adapt';
|
||||
import { getAIApi } from '@fastgpt/core/ai/config';
|
||||
import type { ChatCompletion, StreamChatType } from '@fastgpt/core/ai/type';
|
||||
import { TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { getChatModel } from '@/service/utils/data';
|
||||
import { countModelPrice } from '@/service/common/bill/push';
|
||||
import { ChatModelItemType } from '@/types/model';
|
||||
import { postTextCensor } from '@fastgpt/common/plusApi/censor';
|
||||
@@ -15,12 +14,13 @@ import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/core/ai/constant'
|
||||
import { AppModuleItemType } from '@/types/app';
|
||||
import { countMessagesTokens, sliceMessagesTB } from '@/utils/common/tiktoken';
|
||||
import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
|
||||
import { defaultQuotePrompt, defaultQuoteTemplate } from '@/global/core/prompt/AIChat';
|
||||
import { Prompt_QuotePromptList, Prompt_QuoteTemplateList } from '@/global/core/prompt/AIChat';
|
||||
import type { AIChatProps } from '@/types/core/aiChat';
|
||||
import { replaceVariable } from '@/utils/common/tools/text';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import type { ModuleDispatchProps } from '@/types/core/chat/type';
|
||||
import { responseWrite, responseWriteController } from '@/service/common/stream';
|
||||
import { responseWrite, responseWriteController } from '@fastgpt/common/tools/stream';
|
||||
import { getChatModel, ModelTypeEnum } from '@/service/core/ai/model';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatProps & {
|
||||
@@ -47,12 +47,13 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
user,
|
||||
outputs,
|
||||
inputs: {
|
||||
model = global.chatModels[0]?.model,
|
||||
model,
|
||||
temperature = 0,
|
||||
maxToken = 4000,
|
||||
history = [],
|
||||
quoteQA = [],
|
||||
userChatInput,
|
||||
isResponseAnswerText = true,
|
||||
systemPrompt = '',
|
||||
limitPrompt,
|
||||
quoteTemplate,
|
||||
@@ -63,6 +64,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
return Promise.reject('Question is empty');
|
||||
}
|
||||
|
||||
stream = stream && isResponseAnswerText;
|
||||
|
||||
// temperature adapt
|
||||
const modelConstantsData = getChatModel(model);
|
||||
|
||||
@@ -110,18 +113,18 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
model,
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: [
|
||||
...(modelConstantsData.defaultSystem
|
||||
...(modelConstantsData.defaultSystemChatPrompt
|
||||
? [
|
||||
{
|
||||
role: ChatCompletionRequestMessageRoleEnum.System,
|
||||
content: modelConstantsData.defaultSystem
|
||||
content: modelConstantsData.defaultSystemChatPrompt
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...messages
|
||||
],
|
||||
stream
|
||||
]
|
||||
});
|
||||
|
||||
const { answerText, totalTokens, completeMessages } = await (async () => {
|
||||
@@ -172,7 +175,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
moduleType: FlowModuleTypeEnum.chatNode,
|
||||
moduleName,
|
||||
price: user.openaiAccount?.key ? 0 : countModelPrice({ model, tokens: totalTokens }),
|
||||
price: user.openaiAccount?.key
|
||||
? 0
|
||||
: countModelPrice({ model, tokens: totalTokens, type: ModelTypeEnum.chat }),
|
||||
model: modelConstantsData.name,
|
||||
tokens: totalTokens,
|
||||
question: userChatInput,
|
||||
@@ -198,7 +203,7 @@ function filterQuote({
|
||||
maxTokens: model.quoteMaxToken,
|
||||
messages: quoteQA.map((item, index) => ({
|
||||
obj: ChatRoleEnum.System,
|
||||
value: replaceVariable(quoteTemplate || defaultQuoteTemplate, {
|
||||
value: replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
|
||||
...item,
|
||||
index: index + 1
|
||||
})
|
||||
@@ -212,7 +217,7 @@ function filterQuote({
|
||||
filterQuoteQA.length > 0
|
||||
? `${filterQuoteQA
|
||||
.map((item, index) =>
|
||||
replaceVariable(quoteTemplate || defaultQuoteTemplate, {
|
||||
replaceVariable(quoteTemplate || Prompt_QuoteTemplateList[0].value, {
|
||||
...item,
|
||||
index: `${index + 1}`
|
||||
})
|
||||
@@ -243,7 +248,7 @@ function getChatMessages({
|
||||
model: ChatModelItemType;
|
||||
}) {
|
||||
const question = quoteText
|
||||
? replaceVariable(quotePrompt || defaultQuotePrompt, {
|
||||
? replaceVariable(quotePrompt || Prompt_QuotePromptList[0].value, {
|
||||
quote: quoteText,
|
||||
question: userChatInput
|
||||
})
|
||||
@@ -275,7 +280,7 @@ function getChatMessages({
|
||||
|
||||
const filterMessages = ChatContextFilter({
|
||||
messages,
|
||||
maxTokens: Math.ceil(model.contextMaxToken - 300) // filter token. not response maxToken
|
||||
maxTokens: Math.ceil(model.maxToken - 300) // filter token. not response maxToken
|
||||
});
|
||||
|
||||
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
|
||||
@@ -294,7 +299,7 @@ function getMaxTokens({
|
||||
model: ChatModelItemType;
|
||||
filterMessages: ChatProps['inputs']['history'];
|
||||
}) {
|
||||
const tokensLimit = model.contextMaxToken;
|
||||
const tokensLimit = model.maxToken;
|
||||
/* count response max token */
|
||||
|
||||
const promptsToken = countMessagesTokens({
|
||||
@@ -349,7 +354,7 @@ async function streamResponse({
|
||||
stream.controller?.abort();
|
||||
break;
|
||||
}
|
||||
const content = part.choices[0]?.delta?.content || '';
|
||||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
answer += content;
|
||||
|
||||
responseWrite({
|
||||
|
@@ -8,6 +8,7 @@ import type { QuoteItemType } from '@/types/chat';
|
||||
import { PgDatasetTableName } from '@/constants/plugin';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import type { ModuleDispatchProps } from '@/types/core/chat/type';
|
||||
import { ModelTypeEnum } from '@/service/core/ai/model';
|
||||
type KBSearchProps = ModuleDispatchProps<{
|
||||
kbList: SelectedDatasetType;
|
||||
similarity: number;
|
||||
@@ -66,7 +67,11 @@ export async function dispatchKBSearch(props: Record<string, any>): Promise<KBSe
|
||||
responseData: {
|
||||
moduleType: FlowModuleTypeEnum.kbSearchNode,
|
||||
moduleName,
|
||||
price: countModelPrice({ model: vectorModel.model, tokens: tokenLen }),
|
||||
price: countModelPrice({
|
||||
model: vectorModel.model,
|
||||
tokens: tokenLen,
|
||||
type: ModelTypeEnum.vector
|
||||
}),
|
||||
model: vectorModel.name,
|
||||
tokens: tokenLen,
|
||||
similarity,
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import { sseResponseEventEnum, TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { sseResponse } from '@/service/utils/tools';
|
||||
import { responseWrite } from '@fastgpt/common/tools/stream';
|
||||
import { textAdaptGptResponse } from '@/utils/adapt';
|
||||
import type { ModuleDispatchProps } from '@/types/core/chat/type';
|
||||
export type AnswerProps = ModuleDispatchProps<{
|
||||
@@ -21,7 +21,7 @@ export const dispatchAnswer = (props: Record<string, any>): AnswerResponse => {
|
||||
const formatText = typeof text === 'string' ? text : JSON.stringify(text, null, 2);
|
||||
|
||||
if (stream) {
|
||||
sseResponse({
|
||||
responseWrite({
|
||||
res,
|
||||
event: detail ? sseResponseEventEnum.answer : undefined,
|
||||
data: textAdaptGptResponse({
|
||||
|
@@ -3,7 +3,7 @@ import type { ModuleDispatchProps } from '@/types/core/chat/type';
|
||||
import { SelectAppItemType } from '@/types/core/app/flow';
|
||||
import { dispatchModules } from '@/pages/api/v1/chat/completions';
|
||||
import { App } from '@/service/mongo';
|
||||
import { responseWrite } from '@/service/common/stream';
|
||||
import { responseWrite } from '@fastgpt/common/tools/stream';
|
||||
import { ChatRoleEnum, TaskResponseKeyEnum, sseResponseEventEnum } from '@/constants/chat';
|
||||
import { textAdaptGptResponse } from '@/utils/adapt';
|
||||
|
||||
|
@@ -232,6 +232,6 @@ export async function initPg() {
|
||||
`);
|
||||
console.log('init pg successful');
|
||||
} catch (error) {
|
||||
addLog.error('init pg error', error);
|
||||
console.log('init pg error', error);
|
||||
}
|
||||
}
|
||||
|
@@ -1,7 +1,9 @@
|
||||
import { sseResponseEventEnum } from '@/constants/chat';
|
||||
import { NextApiResponse } from 'next';
|
||||
import { proxyError, ERROR_RESPONSE, ERROR_ENUM } from '@fastgpt/common/constant/errorCode';
|
||||
import { clearCookie, sseResponse, addLog } from './utils/tools';
|
||||
import { addLog } from './utils/tools';
|
||||
import { clearCookie } from '@fastgpt/support/user/auth';
|
||||
import { responseWrite } from '@fastgpt/common/tools/stream';
|
||||
|
||||
export interface ResponseType<T = any> {
|
||||
code: number;
|
||||
@@ -66,7 +68,7 @@ export const sseErrRes = (res: NextApiResponse, error: any) => {
|
||||
clearCookie(res);
|
||||
}
|
||||
|
||||
return sseResponse({
|
||||
return responseWrite({
|
||||
res,
|
||||
event: sseResponseEventEnum.error,
|
||||
data: JSON.stringify(ERROR_RESPONSE[errResponseKey])
|
||||
@@ -86,7 +88,7 @@ export const sseErrRes = (res: NextApiResponse, error: any) => {
|
||||
|
||||
addLog.error(`sse error: ${msg}`, error);
|
||||
|
||||
sseResponse({
|
||||
responseWrite({
|
||||
res,
|
||||
event: sseResponseEventEnum.error,
|
||||
data: JSON.stringify({ message: msg })
|
||||
|
@@ -1,24 +0,0 @@
|
||||
export const getChatModel = (model?: string) => {
|
||||
return global.chatModels.find((item) => item.model === model);
|
||||
};
|
||||
export const getVectorModel = (model?: string) => {
|
||||
return (
|
||||
global.vectorModels.find((item) => item.model === model) || {
|
||||
model: 'UnKnow',
|
||||
name: 'UnKnow',
|
||||
defaultToken: 500,
|
||||
price: 0,
|
||||
maxToken: 3000
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
export const getModel = (model?: string) => {
|
||||
return [
|
||||
...global.chatModels,
|
||||
...global.vectorModels,
|
||||
global.qaModel,
|
||||
global.extractModel,
|
||||
global.cqModel
|
||||
].find((item) => item.model === model);
|
||||
};
|
@@ -1,37 +1,7 @@
|
||||
import type { NextApiResponse, NextApiHandler, NextApiRequest } from 'next';
|
||||
import NextCors from 'nextjs-cors';
|
||||
import type { NextApiResponse } from 'next';
|
||||
import { generateQA } from '../events/generateQA';
|
||||
import { generateVector } from '../events/generateVector';
|
||||
|
||||
/* set cookie */
|
||||
export const setCookie = (res: NextApiResponse, token: string) => {
|
||||
res.setHeader(
|
||||
'Set-Cookie',
|
||||
`token=${token}; Path=/; HttpOnly; Max-Age=604800; Samesite=None; Secure;`
|
||||
);
|
||||
};
|
||||
/* clear cookie */
|
||||
export const clearCookie = (res: NextApiResponse) => {
|
||||
res.setHeader('Set-Cookie', 'token=; Path=/; Max-Age=0');
|
||||
};
|
||||
|
||||
export function withNextCors(handler: NextApiHandler): NextApiHandler {
|
||||
return async function nextApiHandlerWrappedWithNextCors(
|
||||
req: NextApiRequest,
|
||||
res: NextApiResponse
|
||||
) {
|
||||
const methods = ['GET', 'eHEAD', 'PUT', 'PATCH', 'POST', 'DELETE'];
|
||||
const origin = req.headers.origin;
|
||||
await NextCors(req, res, {
|
||||
methods,
|
||||
origin: origin,
|
||||
optionsSuccessStatus: 200
|
||||
});
|
||||
|
||||
return handler(req, res);
|
||||
};
|
||||
}
|
||||
|
||||
/* start task */
|
||||
export const startQueue = () => {
|
||||
if (!global.systemEnv) return;
|
||||
@@ -43,20 +13,6 @@ export const startQueue = () => {
|
||||
}
|
||||
};
|
||||
|
||||
export const sseResponse = ({
|
||||
res,
|
||||
event,
|
||||
data
|
||||
}: {
|
||||
res: NextApiResponse;
|
||||
event?: string;
|
||||
data: string;
|
||||
}) => {
|
||||
if (res.closed) return;
|
||||
event && res.write(`event: ${event}\n`);
|
||||
res.write(`data: ${data}\n\n`);
|
||||
};
|
||||
|
||||
/* add logger */
|
||||
export const addLog = {
|
||||
info: (msg: string, obj?: Record<string, any>) => {
|
||||
|
Reference in New Issue
Block a user