mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 21:13:50 +00:00
lock (#2063)
* lock * perf: init data * perf: vision model url * fix: chat index
This commit is contained in:
@@ -63,6 +63,7 @@ const instance = axios.create({
|
||||
'Cache-Control': 'no-cache'
|
||||
}
|
||||
});
|
||||
export const serverRequestBaseUrl = `http://${SERVICE_LOCAL_HOST}`;
|
||||
|
||||
/* 请求拦截 */
|
||||
instance.interceptors.request.use(requestStart, (err) => Promise.reject(err));
|
||||
@@ -79,7 +80,7 @@ export function request(url: string, data: any, config: ConfigType, method: Meth
|
||||
|
||||
return instance
|
||||
.request({
|
||||
baseURL: `http://${SERVICE_LOCAL_HOST}`,
|
||||
baseURL: serverRequestBaseUrl,
|
||||
url,
|
||||
method,
|
||||
data: ['POST', 'PUT'].includes(method) ? data : null,
|
||||
|
@@ -64,10 +64,13 @@ export const getMongoModel = <T>(name: string, schema: mongoose.Schema) => {
|
||||
addCommonMiddleware(schema);
|
||||
|
||||
const model = connectionMongo.model<T>(name, schema);
|
||||
try {
|
||||
model.syncIndexes();
|
||||
} catch (error) {
|
||||
addLog.error('Create index error', error);
|
||||
|
||||
if (process.env.SYNC_INDEX !== '0') {
|
||||
try {
|
||||
model.syncIndexes({ background: true });
|
||||
} catch (error) {
|
||||
addLog.error('Create index error', error);
|
||||
}
|
||||
}
|
||||
|
||||
return model;
|
||||
|
@@ -1,3 +1,4 @@
|
||||
import { exit } from 'process';
|
||||
import { addLog } from '../system/log';
|
||||
import { connectionMongo } from './index';
|
||||
import type { Mongoose } from 'mongoose';
|
||||
@@ -56,9 +57,13 @@ export async function connectMongo({
|
||||
}
|
||||
|
||||
try {
|
||||
afterHook && (await afterHook());
|
||||
if (!global.systemInited) {
|
||||
global.systemInited = true;
|
||||
afterHook && (await afterHook());
|
||||
}
|
||||
} catch (error) {
|
||||
addLog.error('mongo connect after hook error', error);
|
||||
addLog.error('Mongo connect after hook error', error);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return connectionMongo;
|
||||
|
@@ -89,7 +89,7 @@ try {
|
||||
get chat logs;
|
||||
close custom feedback;
|
||||
*/
|
||||
ChatItemSchema.index({ appId: 1, chatId: 1, dataId: 1 }, { background: true, unique: true });
|
||||
ChatItemSchema.index({ appId: 1, chatId: 1, dataId: 1 }, { background: true });
|
||||
// admin charts
|
||||
ChatItemSchema.index({ time: -1, obj: 1 }, { background: true });
|
||||
// timer, clear history
|
||||
|
@@ -85,7 +85,7 @@ try {
|
||||
// get user history
|
||||
ChatSchema.index({ tmbId: 1, appId: 1, top: -1, updateTime: -1 }, { background: true });
|
||||
// delete by appid; clear history; init chat; update chat; auth chat; get chat;
|
||||
ChatSchema.index({ appId: 1, chatId: 1 }, { background: true, unique: true });
|
||||
ChatSchema.index({ appId: 1, chatId: 1 }, { background: true });
|
||||
|
||||
// get chat logs;
|
||||
ChatSchema.index({ teamId: 1, appId: 1, updateTime: -1 }, { background: true });
|
||||
|
@@ -1,4 +1,3 @@
|
||||
import { IMG_BLOCK_KEY } from '@fastgpt/global/core/chat/constants';
|
||||
import { countGptMessagesTokens } from '../../common/string/tiktoken/index';
|
||||
import type {
|
||||
ChatCompletionContentPart,
|
||||
@@ -7,6 +6,8 @@ import type {
|
||||
import axios from 'axios';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
|
||||
import { guessBase64ImageType } from '../../common/file/utils';
|
||||
import { serverRequestBaseUrl } from '../../common/api/serverRequest';
|
||||
import { cloneDeep } from 'lodash';
|
||||
|
||||
/* slice chat context by tokens */
|
||||
const filterEmptyMessages = (messages: ChatCompletionMessageParam[]) => {
|
||||
@@ -120,137 +121,64 @@ export const formatGPTMessagesInRequestBefore = (messages: ChatCompletionMessage
|
||||
.filter(Boolean) as ChatCompletionMessageParam[];
|
||||
};
|
||||
|
||||
/**
|
||||
string to vision model. Follow the markdown code block rule for interception:
|
||||
|
||||
@rule:
|
||||
```img-block
|
||||
{src:""}
|
||||
{src:""}
|
||||
```
|
||||
```file-block
|
||||
{name:"",src:""},
|
||||
{name:"",src:""}
|
||||
```
|
||||
@example:
|
||||
What’s in this image?
|
||||
```img-block
|
||||
{src:"https://1.png"}
|
||||
```
|
||||
@return
|
||||
[
|
||||
{ type: 'text', text: 'What’s in this image?' },
|
||||
{
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: 'https://1.png'
|
||||
}
|
||||
}
|
||||
]
|
||||
*/
|
||||
export async function formatStr2ChatContent(str: string) {
|
||||
const content: ChatCompletionContentPart[] = [];
|
||||
let lastIndex = 0;
|
||||
const regex = new RegExp(`\`\`\`(${IMG_BLOCK_KEY})\\n([\\s\\S]*?)\`\`\``, 'g');
|
||||
|
||||
const imgKey: 'image_url' = 'image_url';
|
||||
|
||||
let match;
|
||||
|
||||
while ((match = regex.exec(str)) !== null) {
|
||||
// add previous text
|
||||
if (match.index > lastIndex) {
|
||||
const text = str.substring(lastIndex, match.index).trim();
|
||||
if (text) {
|
||||
content.push({ type: 'text', text });
|
||||
}
|
||||
}
|
||||
|
||||
const blockType = match[1].trim();
|
||||
|
||||
if (blockType === IMG_BLOCK_KEY) {
|
||||
const blockContentLines = match[2].trim().split('\n');
|
||||
const jsonLines = blockContentLines.map((item) => {
|
||||
try {
|
||||
return JSON.parse(item) as { src: string };
|
||||
} catch (error) {
|
||||
return { src: '' };
|
||||
}
|
||||
});
|
||||
|
||||
for (const item of jsonLines) {
|
||||
if (!item.src) throw new Error("image block's content error");
|
||||
}
|
||||
|
||||
content.push(
|
||||
...jsonLines.map((item) => ({
|
||||
type: imgKey,
|
||||
image_url: {
|
||||
url: item.src
|
||||
}
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
lastIndex = regex.lastIndex;
|
||||
}
|
||||
|
||||
// add remaining text
|
||||
if (lastIndex < str.length) {
|
||||
const remainingText = str.substring(lastIndex).trim();
|
||||
if (remainingText) {
|
||||
content.push({ type: 'text', text: remainingText });
|
||||
}
|
||||
}
|
||||
|
||||
// Continuous text type content, if type=text, merge them
|
||||
for (let i = 0; i < content.length - 1; i++) {
|
||||
const currentContent = content[i];
|
||||
const nextContent = content[i + 1];
|
||||
if (currentContent.type === 'text' && nextContent.type === 'text') {
|
||||
currentContent.text += nextContent.text;
|
||||
content.splice(i + 1, 1);
|
||||
i--;
|
||||
}
|
||||
}
|
||||
|
||||
if (content.length === 1 && content[0].type === 'text') {
|
||||
return content[0].text;
|
||||
}
|
||||
|
||||
if (!content) return null;
|
||||
// load img to base64
|
||||
for await (const item of content) {
|
||||
if (item.type === imgKey && item[imgKey]?.url) {
|
||||
const response = await axios.get(item[imgKey].url, {
|
||||
responseType: 'arraybuffer'
|
||||
});
|
||||
const base64 = Buffer.from(response.data).toString('base64');
|
||||
item[imgKey].url = `data:${response.headers['content-type']};base64,${base64}`;
|
||||
}
|
||||
}
|
||||
|
||||
return content ? content : null;
|
||||
}
|
||||
|
||||
/* Load user chat content.
|
||||
Img: to base 64
|
||||
*/
|
||||
export const loadChatImgToBase64 = async (content: string | ChatCompletionContentPart[]) => {
|
||||
if (typeof content === 'string') {
|
||||
return content;
|
||||
}
|
||||
|
||||
return Promise.all(
|
||||
content.map(async (item) => {
|
||||
if (item.type === 'text') return item;
|
||||
// load image
|
||||
const response = await axios.get(item.image_url.url, {
|
||||
responseType: 'arraybuffer'
|
||||
});
|
||||
const base64 = Buffer.from(response.data).toString('base64');
|
||||
let imageType = response.headers['content-type'];
|
||||
if (imageType === undefined) {
|
||||
imageType = guessBase64ImageType(base64);
|
||||
|
||||
if (!item.image_url.url) return item;
|
||||
|
||||
/*
|
||||
1. From db: Get it from db
|
||||
2. From web: Not update
|
||||
*/
|
||||
if (item.image_url.url.startsWith('/')) {
|
||||
const response = await axios.get(item.image_url.url, {
|
||||
baseURL: serverRequestBaseUrl,
|
||||
responseType: 'arraybuffer'
|
||||
});
|
||||
const base64 = Buffer.from(response.data).toString('base64');
|
||||
let imageType = response.headers['content-type'];
|
||||
if (imageType === undefined) {
|
||||
imageType = guessBase64ImageType(base64);
|
||||
}
|
||||
return {
|
||||
...item,
|
||||
image_url: {
|
||||
...item.image_url,
|
||||
url: `data:${imageType};base64,${base64}`
|
||||
}
|
||||
};
|
||||
}
|
||||
item.image_url.url = `data:${imageType};base64,${base64}`;
|
||||
|
||||
return item;
|
||||
})
|
||||
);
|
||||
};
|
||||
export const loadRequestMessages = async (messages: ChatCompletionMessageParam[]) => {
|
||||
if (messages.length === 0) {
|
||||
return Promise.reject('core.chat.error.Messages empty');
|
||||
}
|
||||
|
||||
const loadMessages = await Promise.all(
|
||||
messages.map(async (item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
return {
|
||||
...item,
|
||||
content: await loadChatImgToBase64(item.content)
|
||||
};
|
||||
} else {
|
||||
return item;
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
return loadMessages;
|
||||
};
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
StreamChatType,
|
||||
@@ -88,6 +88,7 @@ export const runToolWithFunctionCall = async (
|
||||
}
|
||||
return item;
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(formativeMessages);
|
||||
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
@@ -99,7 +100,7 @@ export const runToolWithFunctionCall = async (
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: formativeMessages,
|
||||
messages: requestMessages,
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
},
|
||||
|
@@ -12,6 +12,7 @@ import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
import {
|
||||
GPTMessages2Chats,
|
||||
chatValue2RuntimePrompt,
|
||||
chats2GPTMessages,
|
||||
getSystemPrompt,
|
||||
runtimePrompt2ChatsValue
|
||||
@@ -29,10 +30,11 @@ type Response = DispatchNodeResultType<{
|
||||
|
||||
export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<Response> => {
|
||||
const {
|
||||
node: { nodeId, name, outputs },
|
||||
node: { nodeId, name },
|
||||
runtimeNodes,
|
||||
runtimeEdges,
|
||||
histories,
|
||||
query,
|
||||
params: { model, systemPrompt, userChatInput, history = 6 }
|
||||
} = props;
|
||||
|
||||
@@ -65,7 +67,7 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: runtimePrompt2ChatsValue({
|
||||
text: userChatInput,
|
||||
files: []
|
||||
files: chatValue2RuntimePrompt(query).files
|
||||
})
|
||||
}
|
||||
];
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
StreamChatType,
|
||||
@@ -87,6 +87,8 @@ export const runToolWithPromptCall = async (
|
||||
messages,
|
||||
maxTokens: toolModel.maxContext - 500 // filter token. not response maxToken
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(filterMessages);
|
||||
|
||||
// console.log(JSON.stringify(filterMessages, null, 2));
|
||||
/* Run llm */
|
||||
const ai = getAIApi({
|
||||
@@ -98,7 +100,7 @@ export const runToolWithPromptCall = async (
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: filterMessages
|
||||
messages: requestMessages
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxTokens } from '../../../../chat/utils';
|
||||
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../../chat/utils';
|
||||
import {
|
||||
ChatCompletion,
|
||||
ChatCompletionMessageToolCall,
|
||||
@@ -99,6 +99,8 @@ export const runToolWithToolChoice = async (
|
||||
}
|
||||
return item;
|
||||
});
|
||||
const requestMessages = await loadRequestMessages(formativeMessages);
|
||||
|
||||
// console.log(
|
||||
// JSON.stringify(
|
||||
// {
|
||||
@@ -106,7 +108,7 @@ export const runToolWithToolChoice = async (
|
||||
// model: toolModel.model,
|
||||
// temperature: 0,
|
||||
// stream,
|
||||
// messages: formativeMessages,
|
||||
// messages: requestMessages,
|
||||
// tools,
|
||||
// tool_choice: 'auto'
|
||||
// },
|
||||
@@ -124,7 +126,7 @@ export const runToolWithToolChoice = async (
|
||||
model: toolModel.model,
|
||||
temperature: 0,
|
||||
stream,
|
||||
messages: formativeMessages,
|
||||
messages: requestMessages,
|
||||
tools,
|
||||
tool_choice: 'auto'
|
||||
},
|
||||
|
@@ -2,7 +2,7 @@ import type { NextApiResponse } from 'next';
|
||||
import {
|
||||
filterGPTMessageByMaxTokens,
|
||||
formatGPTMessagesInRequestBefore,
|
||||
loadChatImgToBase64
|
||||
loadRequestMessages
|
||||
} from '../../../chat/utils';
|
||||
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
|
||||
@@ -151,22 +151,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
...formatGPTMessagesInRequestBefore(filterMessages)
|
||||
] as ChatCompletionMessageParam[];
|
||||
|
||||
if (concatMessages.length === 0) {
|
||||
return Promise.reject('core.chat.error.Messages empty');
|
||||
}
|
||||
|
||||
const loadMessages = await Promise.all(
|
||||
concatMessages.map(async (item) => {
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
|
||||
return {
|
||||
...item,
|
||||
content: await loadChatImgToBase64(item.content)
|
||||
};
|
||||
} else {
|
||||
return item;
|
||||
}
|
||||
})
|
||||
);
|
||||
const requestMessages = await loadRequestMessages(concatMessages);
|
||||
|
||||
const requestBody = {
|
||||
...modelConstantsData?.defaultConfig,
|
||||
@@ -174,7 +159,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
temperature,
|
||||
max_tokens,
|
||||
stream,
|
||||
messages: loadMessages
|
||||
messages: requestMessages
|
||||
};
|
||||
const response = await ai.chat.completions.create(requestBody, {
|
||||
headers: {
|
||||
|
@@ -25,7 +25,7 @@
|
||||
"mammoth": "^1.6.0",
|
||||
"mongoose": "^7.0.2",
|
||||
"multer": "1.4.5-lts.1",
|
||||
"next": "14.2.3",
|
||||
"next": "14.2.5",
|
||||
"nextjs-cors": "^2.2.0",
|
||||
"node-cron": "^3.0.3",
|
||||
"node-xlsx": "^0.23.0",
|
||||
|
Reference in New Issue
Block a user