This commit is contained in:
Archer
2023-10-11 17:18:43 +08:00
committed by GitHub
parent d0041a98b4
commit bcf9491999
51 changed files with 852 additions and 460 deletions

View File

@@ -2,7 +2,7 @@ import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
import { ChatContextFilter } from '@/service/common/tiktoken';
import type { ChatHistoryItemResType, ChatItemType } from '@/types/chat';
import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
import { getAIChatApi, axiosConfig } from '@fastgpt/core/ai/config';
import { getAIApi } from '@fastgpt/core/ai/config';
import type { ClassifyQuestionAgentItemType } from '@/types/app';
import { SystemInputEnum } from '@/constants/app';
import { SpecialInputKeyEnum } from '@/constants/flow';
@@ -105,27 +105,22 @@ async function functionCall({
required: ['type']
}
};
const chatAPI = getAIChatApi(user.openaiAccount);
const ai = getAIApi(user.openaiAccount);
const response = await chatAPI.createChatCompletion(
{
model: cqModel.model,
temperature: 0,
messages: [...adaptMessages],
function_call: { name: agentFunName },
functions: [agentFunction]
},
{
...axiosConfig(user.openaiAccount)
}
);
const response = await ai.chat.completions.create({
model: cqModel.model,
temperature: 0,
messages: [...adaptMessages],
function_call: { name: agentFunName },
functions: [agentFunction]
});
try {
const arg = JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || '');
const arg = JSON.parse(response.choices?.[0]?.message?.function_call?.arguments || '');
return {
arg,
tokens: response.data.usage?.total_tokens || 0
tokens: response.usage?.total_tokens || 0
};
} catch (error) {
console.log('Your model may not support function_call');
@@ -155,20 +150,14 @@ Human:${userChatInput}`
}
];
const chatAPI = getAIChatApi(user.openaiAccount);
const ai = getAIApi(user.openaiAccount, 480000);
const { data } = await chatAPI.createChatCompletion(
{
model: extractModel.model,
temperature: 0.01,
messages: adaptChat2GptMessages({ messages, reserveId: false }),
stream: false
},
{
timeout: 480000,
...axiosConfig(user.openaiAccount)
}
);
const data = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0.01,
messages: adaptChat2GptMessages({ messages, reserveId: false }),
stream: false
});
const answer = data.choices?.[0].message?.content || '';
const totalTokens = data.usage?.total_tokens || 0;

View File

@@ -2,7 +2,7 @@ import { adaptChat2GptMessages } from '@/utils/common/adapt/message';
import { ChatContextFilter } from '@/service/common/tiktoken';
import type { ChatHistoryItemResType, ChatItemType } from '@/types/chat';
import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
import { getAIChatApi, axiosConfig } from '@fastgpt/core/ai/config';
import { getAIApi } from '@fastgpt/core/ai/config';
import type { ContextExtractAgentItemType } from '@/types/app';
import { ContextExtractEnum } from '@/constants/flow/flowField';
import { FlowModuleTypeEnum } from '@/constants/flow';
@@ -126,30 +126,25 @@ async function functionCall({
}
};
const chatAPI = getAIChatApi(user.openaiAccount);
const ai = getAIApi(user.openaiAccount);
const response = await chatAPI.createChatCompletion(
{
model: extractModel.model,
temperature: 0,
messages: [...adaptMessages],
function_call: { name: agentFunName },
functions: [agentFunction]
},
{
...axiosConfig(user.openaiAccount)
}
);
const response = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0,
messages: [...adaptMessages],
function_call: { name: agentFunName },
functions: [agentFunction]
});
const arg: Record<string, any> = (() => {
try {
return JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || '{}');
return JSON.parse(response.choices?.[0]?.message?.function_call?.arguments || '{}');
} catch (error) {
return {};
}
})();
const tokens = response.data.usage?.total_tokens || 0;
const tokens = response.usage?.total_tokens || 0;
return {
tokens,
arg
@@ -181,20 +176,14 @@ Human: ${content}`
}
];
const chatAPI = getAIChatApi(user.openaiAccount);
const ai = getAIApi(user.openaiAccount, 480000);
const { data } = await chatAPI.createChatCompletion(
{
model: extractModel.model,
temperature: 0.01,
messages: adaptChat2GptMessages({ messages, reserveId: false }),
stream: false
},
{
timeout: 480000,
...axiosConfig(user.openaiAccount)
}
);
const data = await ai.chat.completions.create({
model: extractModel.model,
temperature: 0.01,
messages: adaptChat2GptMessages({ messages, reserveId: false }),
stream: false
});
const answer = data.choices?.[0].message?.content || '';
const totalTokens = data.usage?.total_tokens || 0;

View File

@@ -3,9 +3,9 @@ import { ChatContextFilter } from '@/service/common/tiktoken';
import type { ChatItemType, QuoteItemType } from '@/types/chat';
import type { ChatHistoryItemResType } from '@/types/chat';
import { ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
import { SSEParseData, parseStreamChunk } from '@/utils/sse';
import { textAdaptGptResponse } from '@/utils/adapt';
import { getAIChatApi, axiosConfig } from '@fastgpt/core/ai/config';
import { getAIApi } from '@fastgpt/core/ai/config';
import type { ChatCompletion, StreamChatType } from '@fastgpt/core/ai/type';
import { TaskResponseKeyEnum } from '@/constants/chat';
import { getChatModel } from '@/service/utils/data';
import { countModelPrice } from '@/service/common/bill/push';
@@ -20,9 +20,7 @@ import type { AIChatProps } from '@/types/core/aiChat';
import { replaceVariable } from '@/utils/common/tools/text';
import { FlowModuleTypeEnum } from '@/constants/flow';
import type { ModuleDispatchProps } from '@/types/core/chat/type';
import { Readable } from 'stream';
import { responseWrite, responseWriteController } from '@/service/common/stream';
import { addLog } from '@/service/utils/tools';
export type ChatProps = ModuleDispatchProps<
AIChatProps & {
@@ -106,32 +104,25 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
// FastGPT temperature range: 1~10
temperature = +(modelConstantsData.maxTemperature * (temperature / 10)).toFixed(2);
temperature = Math.max(temperature, 0.01);
const chatAPI = getAIChatApi(user.openaiAccount);
const ai = getAIApi(user.openaiAccount, 480000);
const response = await chatAPI.createChatCompletion(
{
model,
temperature,
max_tokens,
messages: [
...(modelConstantsData.defaultSystem
? [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: modelConstantsData.defaultSystem
}
]
: []),
...messages
],
stream
},
{
timeout: 480000,
responseType: stream ? 'stream' : 'json',
...axiosConfig(user.openaiAccount)
}
);
const response = await ai.chat.completions.create({
model,
temperature,
max_tokens,
messages: [
...(modelConstantsData.defaultSystem
? [
{
role: ChatCompletionRequestMessageRoleEnum.System,
content: modelConstantsData.defaultSystem
}
]
: []),
...messages
],
stream
});
const { answerText, totalTokens, completeMessages } = await (async () => {
if (stream) {
@@ -139,7 +130,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
const { answer } = await streamResponse({
res,
detail,
response
stream: response
});
// count tokens
const completeMessages = filterMessages.concat({
@@ -159,8 +150,9 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
completeMessages
};
} else {
const answer = response.data.choices?.[0].message?.content || '';
const totalTokens = response.data.usage?.total_tokens || 0;
const unStreamResponse = response as ChatCompletion;
const answer = unStreamResponse.choices?.[0].message?.content || '';
const totalTokens = unStreamResponse.usage?.total_tokens || 0;
const completeMessages = filterMessages.concat({
obj: ChatRoleEnum.AI,
@@ -208,7 +200,7 @@ function filterQuote({
obj: ChatRoleEnum.System,
value: replaceVariable(quoteTemplate || defaultQuoteTemplate, {
...item,
index: `${index + 1}`
index: index + 1
})
}))
});
@@ -340,59 +332,40 @@ function targetResponse({
async function streamResponse({
res,
detail,
response
stream
}: {
res: NextApiResponse;
detail: boolean;
response: any;
stream: StreamChatType;
}) {
return new Promise<{ answer: string }>((resolve, reject) => {
const stream = response.data as Readable;
let answer = '';
const parseData = new SSEParseData();
const write = responseWriteController({
res,
readStream: stream
});
stream.on('data', (data) => {
if (res.closed) {
stream.destroy();
return resolve({ answer });
}
const parse = parseStreamChunk(data);
parse.forEach((item) => {
const { data } = parseData.parse(item);
if (!data || data === '[DONE]') return;
const content: string = data?.choices?.[0]?.delta?.content || '';
if (data.error) {
addLog.error(`SSE response`, data.error);
} else {
answer += content;
responseWrite({
write,
event: detail ? sseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
}
});
});
stream.on('end', () => {
resolve({ answer });
});
stream.on('close', () => {
resolve({ answer });
});
stream.on('error', (err) => {
reject(err);
});
const write = responseWriteController({
res,
readStream: stream
});
let answer = '';
for await (const part of stream) {
if (res.closed) {
stream.controller?.abort();
break;
}
const content = part.choices[0]?.delta?.content || '';
answer += content;
responseWrite({
write,
event: detail ? sseResponseEventEnum.answer : undefined,
data: textAdaptGptResponse({
text: content
})
});
}
if (!answer) {
return Promise.reject('Chat API is error or undefined');
}
return { answer };
}
function getHistoryPreview(completeMessages: ChatItemType[]) {

View File

@@ -46,7 +46,9 @@ export async function dispatchKBSearch(props: Record<string, any>): Promise<KBSe
const res: any = await PgClient.query(
`BEGIN;
SET LOCAL ivfflat.probes = ${global.systemEnv.pgIvfflatProbe || 10};
select kb_id,id,q,a,source,file_id from ${PgDatasetTableName} where kb_id IN (${kbList
select id, kb_id, q, a, source, file_id, (vector <#> '[${
vectors[0]
}]') * -1 AS score from ${PgDatasetTableName} where kb_id IN (${kbList
.map((item) => `'${item.kbId}'`)
.join(',')}) AND vector <#> '[${vectors[0]}]' < -${similarity} order by vector <#> '[${
vectors[0]