mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 21:13:50 +00:00
fix: variable input and update chat time
This commit is contained in:
@@ -31,7 +31,8 @@
|
||||
"Mark Count": "Mark Count",
|
||||
"My Apps": "My Apps",
|
||||
"Output Field Settings": "Output Field Settings",
|
||||
"Paste Config": "Paste Config"
|
||||
"Paste Config": "Paste Config",
|
||||
"Variable Key Repeat Tip": "Variable Key Repeat"
|
||||
},
|
||||
"chat": {
|
||||
"Admin Mark Content": "Corrected response",
|
||||
|
@@ -31,7 +31,8 @@
|
||||
"Mark Count": "标注答案数量",
|
||||
"My Apps": "我的应用",
|
||||
"Output Field Settings": "输出字段编辑",
|
||||
"Paste Config": "粘贴配置"
|
||||
"Paste Config": "粘贴配置",
|
||||
"Variable Key Repeat Tip": "变量 key 重复"
|
||||
},
|
||||
"chat": {
|
||||
"Admin Mark Content": "纠正后的回复",
|
||||
|
@@ -198,6 +198,8 @@ const ChatBox = (
|
||||
chatHistory[chatHistory.length - 1]?.status !== 'finish',
|
||||
[chatHistory]
|
||||
);
|
||||
// compute variable input is finish.
|
||||
const [variableInputFinish, setVariableInputFinish] = useState(false);
|
||||
const variableIsFinish = useMemo(() => {
|
||||
if (!variableModules || chatHistory.length > 0) return true;
|
||||
|
||||
@@ -208,8 +210,8 @@ const ChatBox = (
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}, [chatHistory.length, variableModules, variables]);
|
||||
return variableInputFinish;
|
||||
}, [chatHistory.length, variableInputFinish, variableModules, variables]);
|
||||
|
||||
const { register, reset, getValues, setValue, handleSubmit } = useForm<Record<string, any>>({
|
||||
defaultValues: variables
|
||||
@@ -408,6 +410,7 @@ const ChatBox = (
|
||||
]
|
||||
);
|
||||
|
||||
// output data
|
||||
useImperativeHandle(ref, () => ({
|
||||
getChatHistory: () => chatHistory,
|
||||
resetVariables(e) {
|
||||
@@ -420,6 +423,7 @@ const ChatBox = (
|
||||
setVariables(e || defaultVal);
|
||||
},
|
||||
resetHistory(e) {
|
||||
setVariableInputFinish(!!e.length);
|
||||
setChatHistory(e);
|
||||
},
|
||||
scrollToBottom
|
||||
@@ -554,9 +558,7 @@ const ChatBox = (
|
||||
label: item.value,
|
||||
value: item.value
|
||||
}))}
|
||||
{...register(item.key, {
|
||||
required: item.required
|
||||
})}
|
||||
value={getValues(item.key)}
|
||||
onchange={(e) => {
|
||||
setValue(item.key, e);
|
||||
setRefresh(!refresh);
|
||||
@@ -574,6 +576,7 @@ const ChatBox = (
|
||||
onClick={handleSubmit((data) => {
|
||||
onUpdateVariable?.(data);
|
||||
setVariables(data);
|
||||
setVariableInputFinish(true);
|
||||
})}
|
||||
>
|
||||
{'开始对话'}
|
||||
|
@@ -1,13 +1,6 @@
|
||||
import type { ShareChatEditType } from '@/types/app';
|
||||
import type { AppSchema } from '@/types/mongoSchema';
|
||||
|
||||
export enum OpenAiChatEnum {
|
||||
'GPT35' = 'gpt-3.5-turbo',
|
||||
'GPT3516k' = 'gpt-3.5-turbo-16k',
|
||||
'FastAI-Plus' = 'gpt-4',
|
||||
'FastAI-Plus32k' = 'gpt-4-32k'
|
||||
}
|
||||
|
||||
export const defaultApp: AppSchema = {
|
||||
_id: '',
|
||||
userId: 'userId',
|
||||
|
@@ -104,7 +104,6 @@ export async function pushDataToKb({
|
||||
|
||||
// count q token
|
||||
const token = modelToolMap.countTokens({
|
||||
model: 'gpt-3.5-turbo',
|
||||
messages: [{ obj: 'System', value: item.q }]
|
||||
});
|
||||
|
||||
|
@@ -69,7 +69,7 @@ export async function getVector({
|
||||
.then(async (res) => {
|
||||
if (!res.data?.data?.[0]?.embedding) {
|
||||
// @ts-ignore
|
||||
return Promise.reject(res.data?.error?.message || 'Embedding Error');
|
||||
return Promise.reject(res.data?.error?.message || 'Embedding API Error');
|
||||
}
|
||||
return {
|
||||
tokenLen: res.data.usage.total_tokens || 0,
|
||||
|
@@ -4,13 +4,10 @@ import { jsonRes } from '@/service/response';
|
||||
import { authUser } from '@/service/utils/auth';
|
||||
import type { ChatItemType } from '@/types/chat';
|
||||
import { countOpenAIToken } from '@/utils/plugin/openai';
|
||||
import { OpenAiChatEnum } from '@/constants/model';
|
||||
|
||||
type ModelType = `${OpenAiChatEnum}`;
|
||||
|
||||
type Props = {
|
||||
messages: ChatItemType[];
|
||||
model: ModelType;
|
||||
model: string;
|
||||
maxLen: number;
|
||||
};
|
||||
type Response = ChatItemType[];
|
||||
@@ -28,7 +25,6 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
return jsonRes<Response>(res, {
|
||||
data: gpt_chatItemTokenSlice({
|
||||
messages,
|
||||
model,
|
||||
maxToken: maxLen
|
||||
})
|
||||
});
|
||||
@@ -42,11 +38,9 @@ export default async function handler(req: NextApiRequest, res: NextApiResponse)
|
||||
|
||||
export function gpt_chatItemTokenSlice({
|
||||
messages,
|
||||
model = 'gpt-3.5-turbo',
|
||||
maxToken
|
||||
}: {
|
||||
messages: ChatItemType[];
|
||||
model?: string;
|
||||
maxToken: number;
|
||||
}) {
|
||||
let result: ChatItemType[] = [];
|
||||
@@ -54,7 +48,7 @@ export function gpt_chatItemTokenSlice({
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
const msgs = [...result, messages[i]];
|
||||
|
||||
const tokens = countOpenAIToken({ messages: msgs, model });
|
||||
const tokens = countOpenAIToken({ messages: msgs });
|
||||
|
||||
if (tokens < maxToken) {
|
||||
result = msgs;
|
||||
|
@@ -35,7 +35,6 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
|
||||
|
||||
// token check
|
||||
const token = modelToolMap.countTokens({
|
||||
model: 'gpt-3.5-turbo',
|
||||
messages: [{ obj: 'System', value: q }]
|
||||
});
|
||||
|
||||
|
@@ -10,7 +10,7 @@ import type { VariableItemType } from '@/types/app';
|
||||
import MyIcon from '@/components/Icon';
|
||||
import { customAlphabet } from 'nanoid';
|
||||
const nanoid = customAlphabet('abcdefghijklmnopqrstuvwxyz1234567890', 6);
|
||||
import VariableEditModal from '../../../VariableEditModal';
|
||||
import VariableEditModal, { addVariable } from '../../../VariableEditModal';
|
||||
|
||||
export const defaultVariable: VariableItemType = {
|
||||
id: nanoid(),
|
||||
@@ -105,7 +105,7 @@ const NodeUserGuide = ({ data }: NodeProps<FlowModuleItemType>) => {
|
||||
variant={'base'}
|
||||
leftIcon={<AddIcon fontSize={'10px'} />}
|
||||
onClick={() => {
|
||||
const newVariable = { ...defaultVariable, id: nanoid() };
|
||||
const newVariable = addVariable();
|
||||
updateVariables(variables.concat(newVariable));
|
||||
setEditVariable(newVariable);
|
||||
}}
|
||||
|
@@ -532,6 +532,13 @@ const Settings = ({ appId }: { appId: string }) => {
|
||||
variables.map((item) => (item.id === variable.id ? variable : item))
|
||||
);
|
||||
} else {
|
||||
// auth same key
|
||||
if (variables.find((item) => item.key === variable.key)) {
|
||||
return toast({
|
||||
status: 'warning',
|
||||
title: t('app.Variable Key Repeat Tip')
|
||||
});
|
||||
}
|
||||
appendVariable(variable);
|
||||
}
|
||||
|
||||
|
@@ -204,6 +204,6 @@ export const defaultVariable: VariableItemType = {
|
||||
enums: [{ value: '' }]
|
||||
};
|
||||
export const addVariable = () => {
|
||||
const newVariable = { ...defaultVariable, id: nanoid() };
|
||||
const newVariable = { ...defaultVariable, key: nanoid(), id: nanoid() };
|
||||
return newVariable;
|
||||
};
|
||||
|
@@ -1,6 +1,5 @@
|
||||
import type { NextApiResponse } from 'next';
|
||||
import { sseResponse } from '@/service/utils/tools';
|
||||
import { OpenAiChatEnum } from '@/constants/model';
|
||||
import { adaptChatItem_openAI, countOpenAIToken } from '@/utils/plugin/openai';
|
||||
import { modelToolMap } from '@/utils/plugin';
|
||||
import { ChatContextFilter } from '@/service/utils/chat/index';
|
||||
@@ -198,7 +197,6 @@ function filterQuote({
|
||||
model: ChatModelItemType;
|
||||
}) {
|
||||
const sliceResult = modelToolMap.tokenSlice({
|
||||
model: model.model,
|
||||
maxToken: model.quoteMaxToken,
|
||||
messages: quoteQA.map((item) => ({
|
||||
obj: ChatRoleEnum.System,
|
||||
@@ -312,7 +310,6 @@ function getMaxTokens({
|
||||
/* count response max token */
|
||||
|
||||
const promptsToken = modelToolMap.countTokens({
|
||||
model: model.model,
|
||||
messages: filterMessages
|
||||
});
|
||||
maxToken = maxToken + promptsToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
|
||||
@@ -383,7 +380,6 @@ async function streamResponse({
|
||||
}
|
||||
|
||||
if (error) {
|
||||
console.log(error);
|
||||
return Promise.reject(error);
|
||||
}
|
||||
|
||||
|
@@ -1,7 +1,6 @@
|
||||
import { ChatItemType } from '@/types/chat';
|
||||
import { modelToolMap } from '@/utils/plugin';
|
||||
import { ChatRoleEnum } from '@/constants/chat';
|
||||
import { OpenAiChatEnum } from '@/constants/model';
|
||||
import type { NextApiResponse } from 'next';
|
||||
|
||||
export type ChatCompletionResponseType = {
|
||||
@@ -14,7 +13,7 @@ export type StreamResponseType = {
|
||||
chatResponse: any;
|
||||
prompts: ChatItemType[];
|
||||
res: NextApiResponse;
|
||||
model: `${OpenAiChatEnum}`;
|
||||
model: string;
|
||||
[key: string]: any;
|
||||
};
|
||||
|
||||
@@ -45,7 +44,6 @@ export const ChatContextFilter = ({
|
||||
|
||||
// reduce token of systemPrompt
|
||||
maxTokens -= modelToolMap.countTokens({
|
||||
model,
|
||||
messages: systemPrompts
|
||||
});
|
||||
|
||||
@@ -57,7 +55,6 @@ export const ChatContextFilter = ({
|
||||
chats.unshift(chatPrompts[i]);
|
||||
|
||||
const tokens = modelToolMap.countTokens({
|
||||
model,
|
||||
messages: chats
|
||||
});
|
||||
|
||||
|
@@ -44,7 +44,7 @@ export async function saveChat({
|
||||
];
|
||||
|
||||
if (chatHistory) {
|
||||
promise.push([
|
||||
promise.push(
|
||||
Chat.updateOne(
|
||||
{ chatId, userId },
|
||||
{
|
||||
@@ -52,7 +52,7 @@ export async function saveChat({
|
||||
updateTime: new Date()
|
||||
}
|
||||
)
|
||||
]);
|
||||
);
|
||||
} else {
|
||||
promise.push(
|
||||
Chat.create({
|
||||
|
@@ -47,21 +47,13 @@ export const adaptChatItem_openAI = ({
|
||||
}));
|
||||
};
|
||||
|
||||
export function countOpenAIToken({
|
||||
messages,
|
||||
model = 'gpt-3.5-turbo'
|
||||
}: {
|
||||
messages: ChatItemType[];
|
||||
model?: string;
|
||||
}) {
|
||||
const diffVal = model.startsWith('gpt-3.5-turbo') ? 3 : 2;
|
||||
|
||||
export function countOpenAIToken({ messages }: { messages: ChatItemType[] }) {
|
||||
const adaptMessages = adaptChatItem_openAI({ messages, reserveId: true });
|
||||
const token = adaptMessages.reduce((sum, item) => {
|
||||
const text = `${item.role}\n${item.content}`;
|
||||
const enc = getOpenAiEncMap();
|
||||
const encodeText = enc.encode(text);
|
||||
const tokens = encodeText.length + diffVal;
|
||||
const tokens = encodeText.length + 3; // 补充估算值
|
||||
return sum + tokens;
|
||||
}, 0);
|
||||
|
||||
|
Reference in New Issue
Block a user