simple mode tool reason (#3984)

* simple mode tool reason

* model config cannot set empty

* perf: read files code

* perf: mongo gridfs chunks

* perf: doc
This commit is contained in:
Archer
2025-03-05 15:55:02 +08:00
committed by archer
parent 32ce032995
commit a345e56508
10 changed files with 92 additions and 22 deletions

View File

@@ -52,7 +52,9 @@ export async function uploadFile({
const stats = await fsp.stat(path);
if (!stats.isFile()) return Promise.reject(`${path} is not a file`);
const readStream = fs.createReadStream(path);
const readStream = fs.createReadStream(path, {
highWaterMark: 256 * 1024
});
// Add default metadata
metadata.teamId = teamId;
@@ -62,9 +64,27 @@ export async function uploadFile({
// create a gridfs bucket
const bucket = getGridBucket(bucketName);
const fileSize = stats.size;
const chunkSizeBytes = (() => {
// 计算理想块大小:文件大小 ÷ 目标块数(10)
const idealChunkSize = Math.ceil(fileSize / 10);
// 确保块大小至少为512KB
const minChunkSize = 512 * 1024; // 512KB
// 取理想块大小和最小块大小中的较大值
let chunkSize = Math.max(idealChunkSize, minChunkSize);
// 将块大小向上取整到最接近的64KB的倍数使其更整齐
chunkSize = Math.ceil(chunkSize / (64 * 1024)) * (64 * 1024);
return chunkSize;
})();
const stream = bucket.openUploadStream(filename, {
metadata,
contentType
contentType,
chunkSizeBytes
});
// save to gridfs

View File

@@ -3,15 +3,13 @@ import { PassThrough } from 'stream';
export const gridFsStream2Buffer = (stream: NodeJS.ReadableStream) => {
return new Promise<Buffer>((resolve, reject) => {
const chunks: Buffer[] = [];
let totalLength = 0;
const chunks: Uint8Array[] = [];
stream.on('data', (chunk) => {
chunks.push(chunk);
totalLength += chunk.length;
});
stream.on('end', () => {
const resultBuffer = Buffer.concat(chunks, totalLength); // 一次性拼接
const resultBuffer = Buffer.concat(chunks); // 一次性拼接
resolve(resultBuffer);
});
stream.on('error', (err) => {
@@ -21,25 +19,26 @@ export const gridFsStream2Buffer = (stream: NodeJS.ReadableStream) => {
};
export const stream2Encoding = async (stream: NodeJS.ReadableStream) => {
const start = Date.now();
const copyStream = stream.pipe(new PassThrough());
/* get encoding */
const buffer = await (() => {
return new Promise<Buffer>((resolve, reject) => {
let tmpBuffer: Buffer = Buffer.from([]);
const chunks: Uint8Array[] = [];
let totalLength = 0;
stream.on('data', (chunk) => {
if (tmpBuffer.length < 200) {
tmpBuffer = Buffer.concat([tmpBuffer, chunk]);
if (totalLength < 200) {
chunks.push(chunk);
totalLength += chunk.length;
if (tmpBuffer.length >= 200) {
resolve(tmpBuffer);
if (totalLength >= 200) {
resolve(Buffer.concat(chunks));
}
}
});
stream.on('end', () => {
resolve(tmpBuffer);
resolve(Buffer.concat(chunks));
});
stream.on('error', (err) => {
reject(err);

View File

@@ -43,13 +43,13 @@ export async function text2Speech({
const readableStream = response.body as unknown as NodeJS.ReadableStream;
readableStream.pipe(res);
let bufferStore = Buffer.from([]);
const chunks: Uint8Array[] = [];
readableStream.on('data', (chunk) => {
bufferStore = Buffer.concat([bufferStore, chunk]);
chunks.push(chunk);
});
readableStream.on('end', () => {
onSuccess({ model, buffer: bufferStore });
onSuccess({ model, buffer: Buffer.concat(chunks) });
});
readableStream.on('error', (e) => {
onError(e);

View File

@@ -46,8 +46,8 @@
"defaultConfig": {},
"fieldMap": {},
"type": "llm",
"showTopP": true,
"showStopSign": true
"showTopP": false,
"showStopSign": false
}
]
}

View File

@@ -21,6 +21,7 @@
"edit_channel": "Channel configuration",
"enable_channel": "Enable",
"forbid_channel": "Disabled",
"input maxToken_tip": "The model max_tokens parameter, if left blank, means that the model does not support it.",
"key_type": "API key format:",
"log": "Call log",
"log_detail": "Log details",
@@ -28,6 +29,7 @@
"log_status": "Status",
"mapping": "Model Mapping",
"mapping_tip": "A valid Json is required. \nThe model can be mapped when sending a request to the actual address. \nFor example:\n{\n \n \"gpt-4o\": \"gpt-4o-test\"\n\n}\n\nWhen FastGPT requests the gpt-4o model, the gpt-4o-test model is sent to the actual address, instead of gpt-4o.",
"max_temperature_tip": "If the model temperature parameter is not filled in, it means that the model does not support the temperature parameter.",
"model": "Model",
"model_name": "Model name",
"model_test": "Model testing",

View File

@@ -21,6 +21,7 @@
"edit_channel": "渠道配置",
"enable_channel": "启用",
"forbid_channel": "禁用",
"input maxToken_tip": "模型 max_tokens 参数,如果留空,则代表模型不支持该参数。",
"key_type": "API key 格式: ",
"log": "调用日志",
"log_detail": "日志详情",
@@ -28,6 +29,7 @@
"log_status": "状态",
"mapping": "模型映射",
"mapping_tip": "需填写一个有效 Json。可在向实际地址发送请求时对模型进行映射。例如\n{\n \"gpt-4o\": \"gpt-4o-test\"\n}\n当 FastGPT 请求 gpt-4o 模型时,会向实际地址发送 gpt-4o-test 的模型,而不是 gpt-4o。",
"max_temperature_tip": "模型 temperature 参数,不填则代表模型不支持 temperature 参数。",
"model": "模型",
"model_name": "模型名",
"model_test": "模型测试",

View File

@@ -19,6 +19,7 @@
"edit_channel": "渠道配置",
"enable_channel": "啟用",
"forbid_channel": "禁用",
"input maxToken_tip": "模型 max_tokens 參數,如果留空,則代表模型不支持該參數。",
"key_type": "API key 格式:",
"log": "調用日誌",
"log_detail": "日誌詳情",
@@ -26,6 +27,7 @@
"log_status": "狀態",
"mapping": "模型映射",
"mapping_tip": "需填寫一個有效 Json。\n可在向實際地址發送請求時對模型進行映射。\n例如\n{\n \n \"gpt-4o\": \"gpt-4o-test\"\n\n}\n\n當 FastGPT 請求 gpt-4o 模型時,會向實際地址發送 gpt-4o-test 的模型,而不是 gpt-4o。",
"max_temperature_tip": "模型 temperature 參數,不填則代表模型不支持 temperature 參數。",
"model": "模型",
"model_name": "模型名",
"model_test": "模型測試",