mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 13:03:50 +00:00
simple mode tool reason (#3984)
* simple mode tool reason * model config cannot set empty * perf: read files code * perf: mongo gridfs chunks * perf: doc
This commit is contained in:
@@ -52,7 +52,9 @@ export async function uploadFile({
|
||||
const stats = await fsp.stat(path);
|
||||
if (!stats.isFile()) return Promise.reject(`${path} is not a file`);
|
||||
|
||||
const readStream = fs.createReadStream(path);
|
||||
const readStream = fs.createReadStream(path, {
|
||||
highWaterMark: 256 * 1024
|
||||
});
|
||||
|
||||
// Add default metadata
|
||||
metadata.teamId = teamId;
|
||||
@@ -62,9 +64,27 @@ export async function uploadFile({
|
||||
// create a gridfs bucket
|
||||
const bucket = getGridBucket(bucketName);
|
||||
|
||||
const fileSize = stats.size;
|
||||
const chunkSizeBytes = (() => {
|
||||
// 计算理想块大小:文件大小 ÷ 目标块数(10)
|
||||
const idealChunkSize = Math.ceil(fileSize / 10);
|
||||
|
||||
// 确保块大小至少为512KB
|
||||
const minChunkSize = 512 * 1024; // 512KB
|
||||
|
||||
// 取理想块大小和最小块大小中的较大值
|
||||
let chunkSize = Math.max(idealChunkSize, minChunkSize);
|
||||
|
||||
// 将块大小向上取整到最接近的64KB的倍数,使其更整齐
|
||||
chunkSize = Math.ceil(chunkSize / (64 * 1024)) * (64 * 1024);
|
||||
|
||||
return chunkSize;
|
||||
})();
|
||||
|
||||
const stream = bucket.openUploadStream(filename, {
|
||||
metadata,
|
||||
contentType
|
||||
contentType,
|
||||
chunkSizeBytes
|
||||
});
|
||||
|
||||
// save to gridfs
|
||||
|
@@ -3,15 +3,13 @@ import { PassThrough } from 'stream';
|
||||
|
||||
export const gridFsStream2Buffer = (stream: NodeJS.ReadableStream) => {
|
||||
return new Promise<Buffer>((resolve, reject) => {
|
||||
const chunks: Buffer[] = [];
|
||||
let totalLength = 0;
|
||||
const chunks: Uint8Array[] = [];
|
||||
|
||||
stream.on('data', (chunk) => {
|
||||
chunks.push(chunk);
|
||||
totalLength += chunk.length;
|
||||
});
|
||||
stream.on('end', () => {
|
||||
const resultBuffer = Buffer.concat(chunks, totalLength); // 一次性拼接
|
||||
const resultBuffer = Buffer.concat(chunks); // 一次性拼接
|
||||
resolve(resultBuffer);
|
||||
});
|
||||
stream.on('error', (err) => {
|
||||
@@ -21,25 +19,26 @@ export const gridFsStream2Buffer = (stream: NodeJS.ReadableStream) => {
|
||||
};
|
||||
|
||||
export const stream2Encoding = async (stream: NodeJS.ReadableStream) => {
|
||||
const start = Date.now();
|
||||
const copyStream = stream.pipe(new PassThrough());
|
||||
|
||||
/* get encoding */
|
||||
const buffer = await (() => {
|
||||
return new Promise<Buffer>((resolve, reject) => {
|
||||
let tmpBuffer: Buffer = Buffer.from([]);
|
||||
const chunks: Uint8Array[] = [];
|
||||
let totalLength = 0;
|
||||
|
||||
stream.on('data', (chunk) => {
|
||||
if (tmpBuffer.length < 200) {
|
||||
tmpBuffer = Buffer.concat([tmpBuffer, chunk]);
|
||||
if (totalLength < 200) {
|
||||
chunks.push(chunk);
|
||||
totalLength += chunk.length;
|
||||
|
||||
if (tmpBuffer.length >= 200) {
|
||||
resolve(tmpBuffer);
|
||||
if (totalLength >= 200) {
|
||||
resolve(Buffer.concat(chunks));
|
||||
}
|
||||
}
|
||||
});
|
||||
stream.on('end', () => {
|
||||
resolve(tmpBuffer);
|
||||
resolve(Buffer.concat(chunks));
|
||||
});
|
||||
stream.on('error', (err) => {
|
||||
reject(err);
|
||||
|
Reference in New Issue
Block a user