mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 05:12:39 +00:00
V4.9.4 feature (#4470)
* Training status (#4424) * dataset data training state (#4311) * dataset data training state * fix * fix ts * fix * fix api format * fix * fix * perf: count training * format * fix: dataset training state (#4417) * fix * add test * fix * fix * fix test * fix test * perf: training count * count * loading status --------- Co-authored-by: heheer <heheer@sealos.io> * doc * website sync feature (#4429) * perf: introduce BullMQ for website sync (#4403) * perf: introduce BullMQ for website sync * feat: new redis module * fix: remove graceful shutdown * perf: improve UI in dataset detail - Updated the "change" icon SVG file. - Modified i18n strings. - Added new i18n string "immediate_sync". - Improved UI in dataset detail page, including button icons and background colors. * refactor: Add chunkSettings to DatasetSchema * perf: website sync ux * env template * fix: clean up website dataset when updating chunk settings (#4420) * perf: check setting updated * perf: worker currency * feat: init script for website sync refactor (#4425) * website feature doc --------- Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com> * pro migration (#4388) (#4433) * pro migration * reuse customPdfParseType Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com> * perf: remove loading ui * feat: config chat file expired time * Redis cache (#4436) * perf: add Redis cache for vector counting (#4432) * feat: cache * perf: get cache key --------- Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com> * perf: mobile voice input (#4437) * update:Mobile voice interaction (#4362) * Add files via upload * Add files via upload * Update ollama.md * Update ollama.md * Add files via upload * Update useSpeech.ts * Update ChatInput.tsx * Update useSpeech.ts * Update ChatInput.tsx * Update useSpeech.ts * Update constants.ts * Add files via upload * Update ChatInput.tsx * Update useSpeech.ts * Update useSpeech.ts * Update useSpeech.ts * Update ChatInput.tsx * Add files via upload * Update common.json * Update VoiceInput.tsx * Update ChatInput.tsx * Update VoiceInput.tsx * Update useSpeech.ts * Update useSpeech.ts * Update common.json * Update common.json * Update common.json * Update VoiceInput.tsx * Update VoiceInput.tsx * Update ChatInput.tsx * Update VoiceInput.tsx * Update ChatInput.tsx * Update VoiceInput.tsx * Update ChatInput.tsx * Update useSpeech.ts * Update common.json * Update chat.json * Update common.json * Update chat.json * Update common.json * Update chat.json * Update VoiceInput.tsx * Update ChatInput.tsx * Update useSpeech.ts * Update VoiceInput.tsx * speech ui * 优化语音输入组件,调整输入框显示逻辑,修复语音输入遮罩层样式,更新画布背景透明度,增强用户交互体验。 (#4435) * perf: mobil voice input --------- Co-authored-by: dreamer6680 <1468683855@qq.com> * Test completion v2 (#4438) * add v2 completions (#4364) * add v2 completions * completion config * config version * fix * frontend * doc * fix * fix: completions v2 api --------- Co-authored-by: heheer <heheer@sealos.io> * package * Test mongo log (#4443) * feat: mongodb-log (#4426) * perf: mongo log * feat: completions stop reasoner * mongo db log --------- Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com> * update doc * Update doc * fix external var ui (#4444) * action * fix: ts (#4458) * preview doc action add docs preview permission update preview action udpate action * update doc (#4460) * update preview action * update doc * remove * update * schema * update mq export;perf: redis cache (#4465) * perf: redis cache * update mq export * perf: website sync error tip * add error worker * website sync ui (#4466) * Updated the dynamic display of the voice input pop-up (#4469) * Update VoiceInput.tsx * Update VoiceInput.tsx * Update VoiceInput.tsx * fix: voice input --------- Co-authored-by: heheer <heheer@sealos.io> Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com> Co-authored-by: gggaaallleee <91131304+gggaaallleee@users.noreply.github.com> Co-authored-by: dreamer6680 <1468683855@qq.com> Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
This commit is contained in:
@@ -1,5 +1,4 @@
|
||||
import { connectionMongo, getMongoModel } from '../../mongo';
|
||||
const { Schema } = connectionMongo;
|
||||
import { getMongoModel, Schema } from '../../mongo';
|
||||
import { RawTextBufferSchemaType } from './type';
|
||||
|
||||
export const collectionName = 'buffer_rawtexts';
|
||||
|
@@ -1,5 +1,4 @@
|
||||
import { connectionMongo, getMongoModel, type Model } from '../../../common/mongo';
|
||||
const { Schema, model, models } = connectionMongo;
|
||||
import { Schema, getMongoModel } from '../../../common/mongo';
|
||||
import { TTSBufferSchemaType } from './type.d';
|
||||
|
||||
export const collectionName = 'buffer_tts';
|
||||
|
79
packages/service/common/bullmq/index.ts
Normal file
79
packages/service/common/bullmq/index.ts
Normal file
@@ -0,0 +1,79 @@
|
||||
import { ConnectionOptions, Processor, Queue, QueueOptions, Worker, WorkerOptions } from 'bullmq';
|
||||
import { addLog } from '../system/log';
|
||||
import { newQueueRedisConnection, newWorkerRedisConnection } from '../redis';
|
||||
|
||||
const defaultWorkerOpts: Omit<ConnectionOptions, 'connection'> = {
|
||||
removeOnComplete: {
|
||||
count: 0 // Delete jobs immediately on completion
|
||||
},
|
||||
removeOnFail: {
|
||||
count: 0 // Delete jobs immediately on failure
|
||||
}
|
||||
};
|
||||
|
||||
export enum QueueNames {
|
||||
websiteSync = 'websiteSync'
|
||||
}
|
||||
|
||||
export const queues = (() => {
|
||||
if (!global.queues) {
|
||||
global.queues = new Map<QueueNames, Queue>();
|
||||
}
|
||||
return global.queues;
|
||||
})();
|
||||
export const workers = (() => {
|
||||
if (!global.workers) {
|
||||
global.workers = new Map<QueueNames, Worker>();
|
||||
}
|
||||
return global.workers;
|
||||
})();
|
||||
|
||||
export function getQueue<DataType, ReturnType = void>(
|
||||
name: QueueNames,
|
||||
opts?: Omit<QueueOptions, 'connection'>
|
||||
): Queue<DataType, ReturnType> {
|
||||
// check if global.queues has the queue
|
||||
const queue = queues.get(name);
|
||||
if (queue) {
|
||||
return queue as Queue<DataType, ReturnType>;
|
||||
}
|
||||
const newQueue = new Queue<DataType, ReturnType>(name.toString(), {
|
||||
connection: newQueueRedisConnection(),
|
||||
...opts
|
||||
});
|
||||
|
||||
// default error handler, to avoid unhandled exceptions
|
||||
newQueue.on('error', (error) => {
|
||||
addLog.error(`MQ Queue [${name}]: ${error.message}`, error);
|
||||
});
|
||||
queues.set(name, newQueue);
|
||||
return newQueue;
|
||||
}
|
||||
|
||||
export function getWorker<DataType, ReturnType = void>(
|
||||
name: QueueNames,
|
||||
processor: Processor<DataType, ReturnType>,
|
||||
opts?: Omit<WorkerOptions, 'connection'>
|
||||
): Worker<DataType, ReturnType> {
|
||||
const worker = workers.get(name);
|
||||
if (worker) {
|
||||
return worker as Worker<DataType, ReturnType>;
|
||||
}
|
||||
|
||||
const newWorker = new Worker<DataType, ReturnType>(name.toString(), processor, {
|
||||
connection: newWorkerRedisConnection(),
|
||||
...defaultWorkerOpts,
|
||||
...opts
|
||||
});
|
||||
// default error handler, to avoid unhandled exceptions
|
||||
newWorker.on('error', (error) => {
|
||||
addLog.error(`MQ Worker [${name}]: ${error.message}`, error);
|
||||
});
|
||||
newWorker.on('failed', (jobId, error) => {
|
||||
addLog.error(`MQ Worker [${name}]: ${error.message}`, error);
|
||||
});
|
||||
workers.set(name, newWorker);
|
||||
return newWorker;
|
||||
}
|
||||
|
||||
export * from 'bullmq';
|
7
packages/service/common/bullmq/type.d.ts
vendored
Normal file
7
packages/service/common/bullmq/type.d.ts
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
import { Queue, Worker } from 'bullmq';
|
||||
import { QueueNames } from './index';
|
||||
|
||||
declare global {
|
||||
var queues: Map<QueueNames, Queue> | undefined;
|
||||
var workers: Map<QueueNames, Worker> | undefined;
|
||||
}
|
@@ -1,5 +1,4 @@
|
||||
import { connectionMongo, getMongoModel, type Model } from '../../mongo';
|
||||
const { Schema } = connectionMongo;
|
||||
import { Schema, getMongoModel } from '../../mongo';
|
||||
|
||||
const DatasetFileSchema = new Schema({});
|
||||
const ChatFileSchema = new Schema({});
|
||||
|
@@ -1,7 +1,6 @@
|
||||
import { TeamCollectionName } from '@fastgpt/global/support/user/team/constant';
|
||||
import { connectionMongo, getMongoModel } from '../../mongo';
|
||||
import { Schema, getMongoModel } from '../../mongo';
|
||||
import { MongoImageSchemaType } from '@fastgpt/global/common/file/image/type.d';
|
||||
const { Schema } = connectionMongo;
|
||||
|
||||
const ImageSchema = new Schema({
|
||||
teamId: {
|
||||
|
@@ -1,17 +1,26 @@
|
||||
import { addLog } from '../../common/system/log';
|
||||
import mongoose, { Model } from 'mongoose';
|
||||
import mongoose, { Model, Mongoose } from 'mongoose';
|
||||
|
||||
export default mongoose;
|
||||
export * from 'mongoose';
|
||||
|
||||
export const MONGO_URL = process.env.MONGODB_URI as string;
|
||||
export const MONGO_LOG_URL = (process.env.MONGODB_LOG_URI ?? process.env.MONGODB_URI) as string;
|
||||
|
||||
export const connectionMongo = (() => {
|
||||
if (!global.mongodb) {
|
||||
global.mongodb = mongoose;
|
||||
global.mongodb = new Mongoose();
|
||||
}
|
||||
|
||||
return global.mongodb;
|
||||
})();
|
||||
|
||||
export const connectionLogMongo = (() => {
|
||||
if (!global.mongodbLog) {
|
||||
global.mongodbLog = new Mongoose();
|
||||
}
|
||||
return global.mongodbLog;
|
||||
})();
|
||||
|
||||
const addCommonMiddleware = (schema: mongoose.Schema) => {
|
||||
const operations = [
|
||||
/^find/,
|
||||
@@ -71,6 +80,19 @@ export const getMongoModel = <T>(name: string, schema: mongoose.Schema) => {
|
||||
return model;
|
||||
};
|
||||
|
||||
export const getMongoLogModel = <T>(name: string, schema: mongoose.Schema) => {
|
||||
if (connectionLogMongo.models[name]) return connectionLogMongo.models[name] as Model<T>;
|
||||
console.log('Load model======', name);
|
||||
addCommonMiddleware(schema);
|
||||
|
||||
const model = connectionLogMongo.model<T>(name, schema);
|
||||
|
||||
// Sync index
|
||||
syncMongoIndex(model);
|
||||
|
||||
return model;
|
||||
};
|
||||
|
||||
const syncMongoIndex = async (model: Model<any>) => {
|
||||
if (process.env.SYNC_INDEX !== '0' && process.env.NODE_ENV !== 'test') {
|
||||
try {
|
||||
|
@@ -1,6 +1,5 @@
|
||||
import { delay } from '@fastgpt/global/common/system/utils';
|
||||
import { addLog } from '../system/log';
|
||||
import { connectionMongo } from './index';
|
||||
import type { Mongoose } from 'mongoose';
|
||||
|
||||
const maxConnecting = Math.max(30, Number(process.env.DB_MAX_LINK || 20));
|
||||
@@ -8,41 +7,41 @@ const maxConnecting = Math.max(30, Number(process.env.DB_MAX_LINK || 20));
|
||||
/**
|
||||
* connect MongoDB and init data
|
||||
*/
|
||||
export async function connectMongo(): Promise<Mongoose> {
|
||||
export async function connectMongo(db: Mongoose, url: string): Promise<Mongoose> {
|
||||
/* Connecting, connected will return */
|
||||
if (connectionMongo.connection.readyState !== 0) {
|
||||
return connectionMongo;
|
||||
if (db.connection.readyState !== 0) {
|
||||
return db;
|
||||
}
|
||||
|
||||
console.log('mongo start connect');
|
||||
console.log('MongoDB start connect');
|
||||
try {
|
||||
// Remove existing listeners to prevent duplicates
|
||||
connectionMongo.connection.removeAllListeners('error');
|
||||
connectionMongo.connection.removeAllListeners('disconnected');
|
||||
connectionMongo.set('strictQuery', 'throw');
|
||||
db.connection.removeAllListeners('error');
|
||||
db.connection.removeAllListeners('disconnected');
|
||||
db.set('strictQuery', 'throw');
|
||||
|
||||
connectionMongo.connection.on('error', async (error) => {
|
||||
db.connection.on('error', async (error) => {
|
||||
console.log('mongo error', error);
|
||||
try {
|
||||
if (connectionMongo.connection.readyState !== 0) {
|
||||
await connectionMongo.disconnect();
|
||||
if (db.connection.readyState !== 0) {
|
||||
await db.disconnect();
|
||||
await delay(1000);
|
||||
await connectMongo();
|
||||
await connectMongo(db, url);
|
||||
}
|
||||
} catch (error) {}
|
||||
});
|
||||
connectionMongo.connection.on('disconnected', async () => {
|
||||
db.connection.on('disconnected', async () => {
|
||||
console.log('mongo disconnected');
|
||||
try {
|
||||
if (connectionMongo.connection.readyState !== 0) {
|
||||
await connectionMongo.disconnect();
|
||||
if (db.connection.readyState !== 0) {
|
||||
await db.disconnect();
|
||||
await delay(1000);
|
||||
await connectMongo();
|
||||
await connectMongo(db, url);
|
||||
}
|
||||
} catch (error) {}
|
||||
});
|
||||
|
||||
await connectionMongo.connect(process.env.MONGODB_URI as string, {
|
||||
const options = {
|
||||
bufferCommands: true,
|
||||
maxConnecting: maxConnecting,
|
||||
maxPoolSize: maxConnecting,
|
||||
@@ -53,18 +52,18 @@ export async function connectMongo(): Promise<Mongoose> {
|
||||
maxIdleTimeMS: 300000,
|
||||
retryWrites: true,
|
||||
retryReads: true
|
||||
};
|
||||
|
||||
// readPreference: 'secondaryPreferred',
|
||||
// readConcern: { level: 'local' },
|
||||
// writeConcern: { w: 'majority', j: true }
|
||||
});
|
||||
db.connect(url, options);
|
||||
|
||||
console.log('mongo connected');
|
||||
return connectionMongo;
|
||||
return db;
|
||||
} catch (error) {
|
||||
addLog.error('mongo connect error', error);
|
||||
await connectionMongo.disconnect();
|
||||
addLog.error('Mongo connect error', error);
|
||||
|
||||
await db.disconnect();
|
||||
|
||||
await delay(1000);
|
||||
return connectMongo();
|
||||
return connectMongo(db, url);
|
||||
}
|
||||
}
|
||||
|
1
packages/service/common/mongo/type.d.ts
vendored
1
packages/service/common/mongo/type.d.ts
vendored
@@ -3,4 +3,5 @@ import type { Logger } from 'winston';
|
||||
|
||||
declare global {
|
||||
var mongodb: Mongoose | undefined;
|
||||
var mongodbLog: Mongoose | undefined;
|
||||
}
|
||||
|
38
packages/service/common/redis/cache.ts
Normal file
38
packages/service/common/redis/cache.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
import { getGlobalRedisCacheConnection } from './index';
|
||||
import { addLog } from '../system/log';
|
||||
import { retryFn } from '@fastgpt/global/common/system/utils';
|
||||
|
||||
export enum CacheKeyEnum {
|
||||
team_vector_count = 'team_vector_count'
|
||||
}
|
||||
|
||||
export const setRedisCache = async (
|
||||
key: string,
|
||||
data: string | Buffer | number,
|
||||
expireSeconds?: number
|
||||
) => {
|
||||
return await retryFn(async () => {
|
||||
try {
|
||||
const redis = getGlobalRedisCacheConnection();
|
||||
|
||||
if (expireSeconds) {
|
||||
await redis.set(key, data, 'EX', expireSeconds);
|
||||
} else {
|
||||
await redis.set(key, data);
|
||||
}
|
||||
} catch (error) {
|
||||
addLog.error('Set cache error:', error);
|
||||
return Promise.reject(error);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
export const getRedisCache = async (key: string) => {
|
||||
const redis = getGlobalRedisCacheConnection();
|
||||
return await retryFn(() => redis.get(key));
|
||||
};
|
||||
|
||||
export const delRedisCache = async (key: string) => {
|
||||
const redis = getGlobalRedisCacheConnection();
|
||||
await retryFn(() => redis.del(key));
|
||||
};
|
43
packages/service/common/redis/index.ts
Normal file
43
packages/service/common/redis/index.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import { addLog } from '../system/log';
|
||||
import Redis from 'ioredis';
|
||||
|
||||
const REDIS_URL = process.env.REDIS_URL ?? 'redis://localhost:6379';
|
||||
|
||||
export const newQueueRedisConnection = () => {
|
||||
const redis = new Redis(REDIS_URL);
|
||||
redis.on('connect', () => {
|
||||
console.log('Redis connected');
|
||||
});
|
||||
redis.on('error', (error) => {
|
||||
console.error('Redis connection error', error);
|
||||
});
|
||||
return redis;
|
||||
};
|
||||
|
||||
export const newWorkerRedisConnection = () => {
|
||||
const redis = new Redis(REDIS_URL, {
|
||||
maxRetriesPerRequest: null
|
||||
});
|
||||
redis.on('connect', () => {
|
||||
console.log('Redis connected');
|
||||
});
|
||||
redis.on('error', (error) => {
|
||||
console.error('Redis connection error', error);
|
||||
});
|
||||
return redis;
|
||||
};
|
||||
|
||||
export const getGlobalRedisCacheConnection = () => {
|
||||
if (global.redisCache) return global.redisCache;
|
||||
|
||||
global.redisCache = new Redis(REDIS_URL, { keyPrefix: 'fastgpt:cache:' });
|
||||
|
||||
global.redisCache.on('connect', () => {
|
||||
addLog.info('Redis connected');
|
||||
});
|
||||
global.redisCache.on('error', (error) => {
|
||||
addLog.error('Redis connection error', error);
|
||||
});
|
||||
|
||||
return global.redisCache;
|
||||
};
|
5
packages/service/common/redis/type.d.ts
vendored
Normal file
5
packages/service/common/redis/type.d.ts
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import Redis from 'ioredis';
|
||||
|
||||
declare global {
|
||||
var redisCache: Redis | null;
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
import { getMongoModel, Schema } from '../../../common/mongo';
|
||||
import { getMongoLogModel as getMongoModel, Schema } from '../../../common/mongo';
|
||||
import { SystemLogType } from './type';
|
||||
import { LogLevelEnum } from './constant';
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
export enum TimerIdEnum {
|
||||
checkInValidDatasetFiles = 'checkInValidDatasetFiles',
|
||||
checkExpiredFiles = 'checkExpiredFiles',
|
||||
checkInvalidDatasetData = 'checkInvalidDatasetData',
|
||||
checkInvalidVector = 'checkInvalidVector',
|
||||
clearExpiredSubPlan = 'clearExpiredSubPlan',
|
||||
|
@@ -2,10 +2,12 @@
|
||||
import { PgVectorCtrl } from './pg/class';
|
||||
import { ObVectorCtrl } from './oceanbase/class';
|
||||
import { getVectorsByText } from '../../core/ai/embedding';
|
||||
import { InsertVectorProps } from './controller.d';
|
||||
import { DelDatasetVectorCtrlProps, InsertVectorProps } from './controller.d';
|
||||
import { EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { MILVUS_ADDRESS, PG_ADDRESS, OCEANBASE_ADDRESS } from './constants';
|
||||
import { MilvusCtrl } from './milvus/class';
|
||||
import { setRedisCache, getRedisCache, delRedisCache, CacheKeyEnum } from '../redis/cache';
|
||||
import { throttle } from 'lodash';
|
||||
|
||||
const getVectorObj = () => {
|
||||
if (PG_ADDRESS) return new PgVectorCtrl();
|
||||
@@ -15,13 +17,33 @@ const getVectorObj = () => {
|
||||
return new PgVectorCtrl();
|
||||
};
|
||||
|
||||
const getChcheKey = (teamId: string) => `${CacheKeyEnum.team_vector_count}:${teamId}`;
|
||||
const onDelCache = throttle((teamId: string) => delRedisCache(getChcheKey(teamId)), 30000, {
|
||||
leading: true,
|
||||
trailing: true
|
||||
});
|
||||
|
||||
const Vector = getVectorObj();
|
||||
|
||||
export const initVectorStore = Vector.init;
|
||||
export const deleteDatasetDataVector = Vector.delete;
|
||||
export const recallFromVectorStore = Vector.embRecall;
|
||||
export const getVectorDataByTime = Vector.getVectorDataByTime;
|
||||
export const getVectorCountByTeamId = Vector.getVectorCountByTeamId;
|
||||
|
||||
export const getVectorCountByTeamId = async (teamId: string) => {
|
||||
const key = getChcheKey(teamId);
|
||||
|
||||
const countStr = await getRedisCache(key);
|
||||
if (countStr) {
|
||||
return Number(countStr);
|
||||
}
|
||||
|
||||
const count = await Vector.getVectorCountByTeamId(teamId);
|
||||
|
||||
await setRedisCache(key, count, 30 * 60);
|
||||
|
||||
return count;
|
||||
};
|
||||
|
||||
export const getVectorCountByDatasetId = Vector.getVectorCountByDatasetId;
|
||||
export const getVectorCountByCollectionId = Vector.getVectorCountByCollectionId;
|
||||
|
||||
@@ -43,8 +65,16 @@ export const insertDatasetDataVector = async ({
|
||||
vector: vectors[0]
|
||||
});
|
||||
|
||||
onDelCache(props.teamId);
|
||||
|
||||
return {
|
||||
tokens,
|
||||
insertId
|
||||
};
|
||||
};
|
||||
|
||||
export const deleteDatasetDataVector = async (props: DelDatasetVectorCtrlProps) => {
|
||||
const result = await Vector.delete(props);
|
||||
onDelCache(props.teamId);
|
||||
return result;
|
||||
};
|
||||
|
@@ -2,6 +2,7 @@ import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import {
|
||||
ChatCompletionCreateParamsNonStreaming,
|
||||
ChatCompletionCreateParamsStreaming,
|
||||
CompletionFinishReason,
|
||||
StreamChatType
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { getLLMModel } from './model';
|
||||
@@ -142,26 +143,40 @@ export const parseReasoningStreamContent = () => {
|
||||
content?: string;
|
||||
reasoning_content?: string;
|
||||
};
|
||||
finish_reason?: CompletionFinishReason;
|
||||
}[];
|
||||
},
|
||||
parseThinkTag = false
|
||||
): [string, string] => {
|
||||
): {
|
||||
reasoningContent: string;
|
||||
content: string;
|
||||
finishReason: CompletionFinishReason;
|
||||
} => {
|
||||
const content = part.choices?.[0]?.delta?.content || '';
|
||||
const finishReason = part.choices?.[0]?.finish_reason || null;
|
||||
|
||||
// @ts-ignore
|
||||
const reasoningContent = part.choices?.[0]?.delta?.reasoning_content || '';
|
||||
if (reasoningContent || !parseThinkTag) {
|
||||
isInThinkTag = false;
|
||||
return [reasoningContent, content];
|
||||
return { reasoningContent, content, finishReason };
|
||||
}
|
||||
|
||||
if (!content) {
|
||||
return ['', ''];
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: '',
|
||||
finishReason
|
||||
};
|
||||
}
|
||||
|
||||
// 如果不在 think 标签中,或者有 reasoningContent(接口已解析),则返回 reasoningContent 和 content
|
||||
if (isInThinkTag === false) {
|
||||
return ['', content];
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content,
|
||||
finishReason
|
||||
};
|
||||
}
|
||||
|
||||
// 检测是否为 think 标签开头的数据
|
||||
@@ -170,17 +185,29 @@ export const parseReasoningStreamContent = () => {
|
||||
startTagBuffer += content;
|
||||
// 太少内容时候,暂时不解析
|
||||
if (startTagBuffer.length < startTag.length) {
|
||||
return ['', ''];
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: '',
|
||||
finishReason
|
||||
};
|
||||
}
|
||||
|
||||
if (startTagBuffer.startsWith(startTag)) {
|
||||
isInThinkTag = true;
|
||||
return [startTagBuffer.slice(startTag.length), ''];
|
||||
return {
|
||||
reasoningContent: startTagBuffer.slice(startTag.length),
|
||||
content: '',
|
||||
finishReason
|
||||
};
|
||||
}
|
||||
|
||||
// 如果未命中 think 标签,则认为不在 think 标签中,返回 buffer 内容作为 content
|
||||
isInThinkTag = false;
|
||||
return ['', startTagBuffer];
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: startTagBuffer,
|
||||
finishReason
|
||||
};
|
||||
}
|
||||
|
||||
// 确认是 think 标签内容,开始返回 think 内容,并实时检测 </think>
|
||||
@@ -201,19 +228,35 @@ export const parseReasoningStreamContent = () => {
|
||||
if (endTagBuffer.includes(endTag)) {
|
||||
isInThinkTag = false;
|
||||
const answer = endTagBuffer.slice(endTag.length);
|
||||
return ['', answer];
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: answer,
|
||||
finishReason
|
||||
};
|
||||
} else if (endTagBuffer.length >= endTag.length) {
|
||||
// 缓存内容超出尾标签长度,且仍未命中 </think>,则认为本次猜测 </think> 失败,仍处于 think 阶段。
|
||||
const tmp = endTagBuffer;
|
||||
endTagBuffer = '';
|
||||
return [tmp, ''];
|
||||
return {
|
||||
reasoningContent: tmp,
|
||||
content: '',
|
||||
finishReason
|
||||
};
|
||||
}
|
||||
return ['', ''];
|
||||
return {
|
||||
reasoningContent: '',
|
||||
content: '',
|
||||
finishReason
|
||||
};
|
||||
} else if (content.includes(endTag)) {
|
||||
// 返回内容,完整命中</think>,直接结束
|
||||
isInThinkTag = false;
|
||||
const [think, answer] = content.split(endTag);
|
||||
return [think, answer];
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: answer,
|
||||
finishReason
|
||||
};
|
||||
} else {
|
||||
// 无 buffer,且未命中 </think>,开始疑似 </think> 检测。
|
||||
for (let i = 1; i < endTag.length; i++) {
|
||||
@@ -222,13 +265,21 @@ export const parseReasoningStreamContent = () => {
|
||||
if (content.endsWith(partialEndTag)) {
|
||||
const think = content.slice(0, -partialEndTag.length);
|
||||
endTagBuffer += partialEndTag;
|
||||
return [think, ''];
|
||||
return {
|
||||
reasoningContent: think,
|
||||
content: '',
|
||||
finishReason
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 完全未命中尾标签,还是 think 阶段。
|
||||
return [content, ''];
|
||||
return {
|
||||
reasoningContent: content,
|
||||
content: '',
|
||||
finishReason
|
||||
};
|
||||
};
|
||||
|
||||
const getStartTagBuffer = () => startTagBuffer;
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import {
|
||||
DatasetCollectionTypeEnum,
|
||||
DatasetCollectionDataProcessModeEnum
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
DatasetTypeEnum
|
||||
} from '@fastgpt/global/core/dataset/constants';
|
||||
import type { CreateDatasetCollectionParams } from '@fastgpt/global/core/dataset/api.d';
|
||||
import { MongoDatasetCollection } from './schema';
|
||||
@@ -104,7 +105,8 @@ export const createCollectionAndInsertData = async ({
|
||||
hashRawText: hashStr(rawText),
|
||||
rawTextLength: rawText.length,
|
||||
nextSyncTime: (() => {
|
||||
if (!dataset.autoSync) return undefined;
|
||||
// ignore auto collections sync for website datasets
|
||||
if (!dataset.autoSync && dataset.type === DatasetTypeEnum.websiteDataset) return undefined;
|
||||
if (
|
||||
[DatasetCollectionTypeEnum.link, DatasetCollectionTypeEnum.apiFile].includes(
|
||||
createCollectionParams.type
|
||||
|
@@ -1,13 +1,8 @@
|
||||
import { connectionMongo, getMongoModel } from '../../../common/mongo';
|
||||
const { Schema, model, models } = connectionMongo;
|
||||
const { Schema } = connectionMongo;
|
||||
import { DatasetCollectionSchemaType } from '@fastgpt/global/core/dataset/type.d';
|
||||
import {
|
||||
DatasetCollectionTypeMap,
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
ChunkSettingModeEnum,
|
||||
DataChunkSplitModeEnum
|
||||
} from '@fastgpt/global/core/dataset/constants';
|
||||
import { DatasetCollectionName } from '../schema';
|
||||
import { DatasetCollectionTypeMap } from '@fastgpt/global/core/dataset/constants';
|
||||
import { ChunkSettings, DatasetCollectionName } from '../schema';
|
||||
import {
|
||||
TeamCollectionName,
|
||||
TeamMemberCollectionName
|
||||
@@ -90,25 +85,7 @@ const DatasetCollectionSchema = new Schema({
|
||||
customPdfParse: Boolean,
|
||||
|
||||
// Chunk settings
|
||||
imageIndex: Boolean,
|
||||
autoIndexes: Boolean,
|
||||
trainingType: {
|
||||
type: String,
|
||||
enum: Object.values(DatasetCollectionDataProcessModeEnum)
|
||||
},
|
||||
chunkSettingMode: {
|
||||
type: String,
|
||||
enum: Object.values(ChunkSettingModeEnum)
|
||||
},
|
||||
chunkSplitMode: {
|
||||
type: String,
|
||||
enum: Object.values(DataChunkSplitModeEnum)
|
||||
},
|
||||
chunkSize: Number,
|
||||
chunkSplitter: String,
|
||||
|
||||
indexSize: Number,
|
||||
qaPrompt: String
|
||||
...ChunkSettings
|
||||
});
|
||||
|
||||
DatasetCollectionSchema.virtual('dataset', {
|
||||
|
@@ -9,6 +9,8 @@ import { deleteDatasetDataVector } from '../../common/vectorStore/controller';
|
||||
import { MongoDatasetDataText } from './data/dataTextSchema';
|
||||
import { DatasetErrEnum } from '@fastgpt/global/common/error/code/dataset';
|
||||
import { retryFn } from '@fastgpt/global/common/system/utils';
|
||||
import { removeWebsiteSyncJobScheduler } from './websiteSync';
|
||||
import { DatasetTypeEnum } from '@fastgpt/global/core/dataset/constants';
|
||||
|
||||
/* ============= dataset ========== */
|
||||
/* find all datasetId by top datasetId */
|
||||
|
@@ -1,7 +1,8 @@
|
||||
import { getMongoModel, Schema } from '../../common/mongo';
|
||||
import {
|
||||
DatasetStatusEnum,
|
||||
DatasetStatusMap,
|
||||
ChunkSettingModeEnum,
|
||||
DataChunkSplitModeEnum,
|
||||
DatasetCollectionDataProcessModeEnum,
|
||||
DatasetTypeEnum,
|
||||
DatasetTypeMap
|
||||
} from '@fastgpt/global/core/dataset/constants';
|
||||
@@ -13,6 +14,28 @@ import type { DatasetSchemaType } from '@fastgpt/global/core/dataset/type.d';
|
||||
|
||||
export const DatasetCollectionName = 'datasets';
|
||||
|
||||
export const ChunkSettings = {
|
||||
imageIndex: Boolean,
|
||||
autoIndexes: Boolean,
|
||||
trainingType: {
|
||||
type: String,
|
||||
enum: Object.values(DatasetCollectionDataProcessModeEnum)
|
||||
},
|
||||
chunkSettingMode: {
|
||||
type: String,
|
||||
enum: Object.values(ChunkSettingModeEnum)
|
||||
},
|
||||
chunkSplitMode: {
|
||||
type: String,
|
||||
enum: Object.values(DataChunkSplitModeEnum)
|
||||
},
|
||||
chunkSize: Number,
|
||||
chunkSplitter: String,
|
||||
|
||||
indexSize: Number,
|
||||
qaPrompt: String
|
||||
};
|
||||
|
||||
const DatasetSchema = new Schema({
|
||||
parentId: {
|
||||
type: Schema.Types.ObjectId,
|
||||
@@ -40,11 +63,6 @@ const DatasetSchema = new Schema({
|
||||
required: true,
|
||||
default: DatasetTypeEnum.dataset
|
||||
},
|
||||
status: {
|
||||
type: String,
|
||||
enum: Object.keys(DatasetStatusMap),
|
||||
default: DatasetStatusEnum.active
|
||||
},
|
||||
avatar: {
|
||||
type: String,
|
||||
default: '/icon/logo.svg'
|
||||
@@ -84,6 +102,9 @@ const DatasetSchema = new Schema({
|
||||
}
|
||||
}
|
||||
},
|
||||
chunkSettings: {
|
||||
type: ChunkSettings
|
||||
},
|
||||
inheritPermission: {
|
||||
type: Boolean,
|
||||
default: true
|
||||
@@ -98,9 +119,8 @@ const DatasetSchema = new Schema({
|
||||
type: Object
|
||||
},
|
||||
|
||||
autoSync: Boolean,
|
||||
|
||||
// abandoned
|
||||
autoSync: Boolean,
|
||||
externalReadUrl: {
|
||||
type: String
|
||||
},
|
||||
|
@@ -98,7 +98,9 @@ const TrainingDataSchema = new Schema({
|
||||
}
|
||||
],
|
||||
default: []
|
||||
}
|
||||
},
|
||||
|
||||
errorMsg: String
|
||||
});
|
||||
|
||||
TrainingDataSchema.virtual('dataset', {
|
||||
|
101
packages/service/core/dataset/websiteSync/index.ts
Normal file
101
packages/service/core/dataset/websiteSync/index.ts
Normal file
@@ -0,0 +1,101 @@
|
||||
import { Processor } from 'bullmq';
|
||||
import { getQueue, getWorker, QueueNames } from '../../../common/bullmq';
|
||||
import { DatasetStatusEnum } from '@fastgpt/global/core/dataset/constants';
|
||||
|
||||
export type WebsiteSyncJobData = {
|
||||
datasetId: string;
|
||||
};
|
||||
|
||||
export const websiteSyncQueue = getQueue<WebsiteSyncJobData>(QueueNames.websiteSync, {
|
||||
defaultJobOptions: {
|
||||
attempts: 3, // retry 3 times
|
||||
backoff: {
|
||||
type: 'exponential',
|
||||
delay: 1000 // delay 1 second between retries
|
||||
}
|
||||
}
|
||||
});
|
||||
export const getWebsiteSyncWorker = (processor: Processor<WebsiteSyncJobData>) => {
|
||||
return getWorker<WebsiteSyncJobData>(QueueNames.websiteSync, processor, {
|
||||
removeOnFail: {
|
||||
age: 15 * 24 * 60 * 60, // Keep up to 15 days
|
||||
count: 1000 // Keep up to 1000 jobs
|
||||
},
|
||||
concurrency: 1 // Set worker to process only 1 job at a time
|
||||
});
|
||||
};
|
||||
|
||||
export const addWebsiteSyncJob = (data: WebsiteSyncJobData) => {
|
||||
const datasetId = String(data.datasetId);
|
||||
// deduplication: make sure only 1 job
|
||||
return websiteSyncQueue.add(datasetId, data, { deduplication: { id: datasetId } });
|
||||
};
|
||||
|
||||
export const getWebsiteSyncDatasetStatus = async (datasetId: string) => {
|
||||
const jobId = await websiteSyncQueue.getDeduplicationJobId(datasetId);
|
||||
if (!jobId) {
|
||||
return {
|
||||
status: DatasetStatusEnum.active,
|
||||
errorMsg: undefined
|
||||
};
|
||||
}
|
||||
const job = await websiteSyncQueue.getJob(jobId);
|
||||
if (!job) {
|
||||
return {
|
||||
status: DatasetStatusEnum.active,
|
||||
errorMsg: undefined
|
||||
};
|
||||
}
|
||||
|
||||
const jobState = await job.getState();
|
||||
|
||||
if (jobState === 'failed' || jobState === 'unknown') {
|
||||
return {
|
||||
status: DatasetStatusEnum.error,
|
||||
errorMsg: job.failedReason
|
||||
};
|
||||
}
|
||||
if (['waiting-children', 'waiting'].includes(jobState)) {
|
||||
return {
|
||||
status: DatasetStatusEnum.waiting,
|
||||
errorMsg: undefined
|
||||
};
|
||||
}
|
||||
if (jobState === 'active') {
|
||||
return {
|
||||
status: DatasetStatusEnum.syncing,
|
||||
errorMsg: undefined
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
status: DatasetStatusEnum.active,
|
||||
errorMsg: undefined
|
||||
};
|
||||
};
|
||||
|
||||
// Scheduler setting
|
||||
const repeatDuration = 24 * 60 * 60 * 1000; // every day
|
||||
export const upsertWebsiteSyncJobScheduler = (data: WebsiteSyncJobData, startDate?: number) => {
|
||||
const datasetId = String(data.datasetId);
|
||||
|
||||
return websiteSyncQueue.upsertJobScheduler(
|
||||
datasetId,
|
||||
{
|
||||
every: repeatDuration,
|
||||
startDate: startDate || new Date().getTime() + repeatDuration // First run tomorrow
|
||||
},
|
||||
{
|
||||
name: datasetId,
|
||||
data
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
export const getWebsiteSyncJobScheduler = (datasetId: string) => {
|
||||
return websiteSyncQueue.getJobScheduler(String(datasetId));
|
||||
};
|
||||
|
||||
export const removeWebsiteSyncJobScheduler = (datasetId: string) => {
|
||||
return websiteSyncQueue.removeJobScheduler(String(datasetId));
|
||||
};
|
@@ -176,7 +176,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
toolNodeOutputTokens,
|
||||
completeMessages = [], // The actual message sent to AI(just save text)
|
||||
assistantResponses = [], // FastGPT system store assistant.value response
|
||||
runTimes
|
||||
runTimes,
|
||||
finish_reason
|
||||
} = await (async () => {
|
||||
const adaptMessages = chats2GPTMessages({
|
||||
messages,
|
||||
@@ -276,7 +277,8 @@ export const dispatchRunTools = async (props: DispatchToolModuleProps): Promise<
|
||||
useVision
|
||||
),
|
||||
toolDetail: childToolResponse,
|
||||
mergeSignId: nodeId
|
||||
mergeSignId: nodeId,
|
||||
finishReason: finish_reason
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
// 工具调用本身的积分消耗
|
||||
|
@@ -1,6 +1,10 @@
|
||||
import { createChatCompletion } from '../../../../ai/config';
|
||||
import { filterGPTMessageByMaxContext, loadRequestMessages } from '../../../../chat/utils';
|
||||
import { StreamChatType, ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import {
|
||||
StreamChatType,
|
||||
ChatCompletionMessageParam,
|
||||
CompletionFinishReason
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
|
||||
@@ -252,9 +256,9 @@ export const runToolWithPromptCall = async (
|
||||
}
|
||||
});
|
||||
|
||||
const { answer, reasoning } = await (async () => {
|
||||
const { answer, reasoning, finish_reason } = await (async () => {
|
||||
if (res && isStreamResponse) {
|
||||
const { answer, reasoning } = await streamResponse({
|
||||
const { answer, reasoning, finish_reason } = await streamResponse({
|
||||
res,
|
||||
toolNodes,
|
||||
stream: aiResponse,
|
||||
@@ -262,8 +266,9 @@ export const runToolWithPromptCall = async (
|
||||
aiChatReasoning
|
||||
});
|
||||
|
||||
return { answer, reasoning };
|
||||
return { answer, reasoning, finish_reason };
|
||||
} else {
|
||||
const finish_reason = aiResponse.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const content = aiResponse.choices?.[0]?.message?.content || '';
|
||||
const reasoningContent: string = aiResponse.choices?.[0]?.message?.reasoning_content || '';
|
||||
|
||||
@@ -271,14 +276,16 @@ export const runToolWithPromptCall = async (
|
||||
if (reasoningContent || !aiChatReasoning) {
|
||||
return {
|
||||
answer: content,
|
||||
reasoning: reasoningContent
|
||||
reasoning: reasoningContent,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
|
||||
const [think, answer] = parseReasoningContent(content);
|
||||
return {
|
||||
answer,
|
||||
reasoning: think
|
||||
reasoning: think,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
})();
|
||||
@@ -525,7 +532,8 @@ ANSWER: `;
|
||||
toolNodeInputTokens,
|
||||
toolNodeOutputTokens,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes
|
||||
runTimes,
|
||||
finish_reason
|
||||
}
|
||||
);
|
||||
};
|
||||
@@ -550,15 +558,18 @@ async function streamResponse({
|
||||
let startResponseWrite = false;
|
||||
let answer = '';
|
||||
let reasoning = '';
|
||||
let finish_reason: CompletionFinishReason = null;
|
||||
const { parsePart, getStartTagBuffer } = parseReasoningStreamContent();
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finish_reason = 'close';
|
||||
break;
|
||||
}
|
||||
|
||||
const [reasoningContent, content] = parsePart(part, aiChatReasoning);
|
||||
const { reasoningContent, content, finishReason } = parsePart(part, aiChatReasoning);
|
||||
finish_reason = finish_reason || finishReason;
|
||||
answer += content;
|
||||
reasoning += reasoningContent;
|
||||
|
||||
@@ -618,7 +629,7 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer, reasoning };
|
||||
return { answer, reasoning, finish_reason };
|
||||
}
|
||||
|
||||
const parseAnswer = (
|
||||
|
@@ -7,7 +7,8 @@ import {
|
||||
ChatCompletionToolMessageParam,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionTool,
|
||||
ChatCompletionAssistantMessageParam
|
||||
ChatCompletionAssistantMessageParam,
|
||||
CompletionFinishReason
|
||||
} from '@fastgpt/global/core/ai/type';
|
||||
import { NextApiResponse } from 'next';
|
||||
import { responseWriteController } from '../../../../../common/response';
|
||||
@@ -300,7 +301,7 @@ export const runToolWithToolChoice = async (
|
||||
}
|
||||
});
|
||||
|
||||
const { answer, toolCalls } = await (async () => {
|
||||
const { answer, toolCalls, finish_reason } = await (async () => {
|
||||
if (res && isStreamResponse) {
|
||||
return streamResponse({
|
||||
res,
|
||||
@@ -310,6 +311,7 @@ export const runToolWithToolChoice = async (
|
||||
});
|
||||
} else {
|
||||
const result = aiResponse as ChatCompletion;
|
||||
const finish_reason = result.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
const calls = result.choices?.[0]?.message?.tool_calls || [];
|
||||
const answer = result.choices?.[0]?.message?.content || '';
|
||||
|
||||
@@ -350,7 +352,8 @@ export const runToolWithToolChoice = async (
|
||||
|
||||
return {
|
||||
answer,
|
||||
toolCalls: toolCalls
|
||||
toolCalls: toolCalls,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
})();
|
||||
@@ -549,8 +552,9 @@ export const runToolWithToolChoice = async (
|
||||
toolNodeOutputTokens,
|
||||
completeMessages,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
toolWorkflowInteractiveResponse,
|
||||
runTimes,
|
||||
toolWorkflowInteractiveResponse
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
|
||||
@@ -565,7 +569,8 @@ export const runToolWithToolChoice = async (
|
||||
toolNodeInputTokens,
|
||||
toolNodeOutputTokens,
|
||||
assistantResponses: toolNodeAssistants,
|
||||
runTimes
|
||||
runTimes,
|
||||
finish_reason
|
||||
}
|
||||
);
|
||||
} else {
|
||||
@@ -588,7 +593,8 @@ export const runToolWithToolChoice = async (
|
||||
|
||||
completeMessages,
|
||||
assistantResponses: [...assistantResponses, ...toolNodeAssistant.value],
|
||||
runTimes: (response?.runTimes || 0) + 1
|
||||
runTimes: (response?.runTimes || 0) + 1,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
};
|
||||
@@ -612,14 +618,18 @@ async function streamResponse({
|
||||
let textAnswer = '';
|
||||
let callingTool: { name: string; arguments: string } | null = null;
|
||||
let toolCalls: ChatCompletionMessageToolCall[] = [];
|
||||
let finishReason: CompletionFinishReason = null;
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finishReason = 'close';
|
||||
break;
|
||||
}
|
||||
|
||||
const responseChoice = part.choices?.[0]?.delta;
|
||||
const finish_reason = part.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
finishReason = finishReason || finish_reason;
|
||||
|
||||
if (responseChoice?.content) {
|
||||
const content = responseChoice.content || '';
|
||||
@@ -705,5 +715,5 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer: textAnswer, toolCalls };
|
||||
return { answer: textAnswer, toolCalls, finish_reason: finishReason };
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
|
||||
import { ChatCompletionMessageParam, CompletionFinishReason } from '@fastgpt/global/core/ai/type';
|
||||
import { NodeInputKeyEnum, NodeOutputKeyEnum } from '@fastgpt/global/core/workflow/constants';
|
||||
import type {
|
||||
ModuleDispatchProps,
|
||||
@@ -43,6 +43,7 @@ export type RunToolResponse = {
|
||||
assistantResponses?: AIChatItemValueItemType[];
|
||||
toolWorkflowInteractiveResponse?: WorkflowInteractiveResponseType;
|
||||
[DispatchNodeResponseKeyEnum.runTimes]: number;
|
||||
finish_reason?: CompletionFinishReason;
|
||||
};
|
||||
export type ToolNodeItemType = RuntimeNodeItemType & {
|
||||
toolParams: RuntimeNodeItemType['inputs'];
|
||||
|
@@ -6,7 +6,11 @@ import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/cons
|
||||
import { textAdaptGptResponse } from '@fastgpt/global/core/workflow/runtime/utils';
|
||||
import { parseReasoningContent, parseReasoningStreamContent } from '../../../ai/utils';
|
||||
import { createChatCompletion } from '../../../ai/config';
|
||||
import type { ChatCompletionMessageParam, StreamChatType } from '@fastgpt/global/core/ai/type.d';
|
||||
import type {
|
||||
ChatCompletionMessageParam,
|
||||
CompletionFinishReason,
|
||||
StreamChatType
|
||||
} from '@fastgpt/global/core/ai/type.d';
|
||||
import { formatModelChars2Points } from '../../../../support/wallet/usage/utils';
|
||||
import type { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { postTextCensor } from '../../../../common/api/requestPlusApi';
|
||||
@@ -101,7 +105,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
const modelConstantsData = getLLMModel(model);
|
||||
if (!modelConstantsData) {
|
||||
return Promise.reject('The chat model is undefined, you need to select a chat model.');
|
||||
return Promise.reject(`Mode ${model} is undefined, you need to select a chat model.`);
|
||||
}
|
||||
|
||||
aiChatVision = modelConstantsData.vision && aiChatVision;
|
||||
@@ -195,16 +199,17 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
}
|
||||
});
|
||||
|
||||
const { answerText, reasoningText } = await (async () => {
|
||||
const { answerText, reasoningText, finish_reason } = await (async () => {
|
||||
if (isStreamResponse) {
|
||||
if (!res) {
|
||||
return {
|
||||
answerText: '',
|
||||
reasoningText: ''
|
||||
reasoningText: '',
|
||||
finish_reason: 'close' as const
|
||||
};
|
||||
}
|
||||
// sse response
|
||||
const { answer, reasoning } = await streamResponse({
|
||||
const { answer, reasoning, finish_reason } = await streamResponse({
|
||||
res,
|
||||
stream: response,
|
||||
aiChatReasoning,
|
||||
@@ -215,9 +220,12 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
return {
|
||||
answerText: answer,
|
||||
reasoningText: reasoning
|
||||
reasoningText: reasoning,
|
||||
finish_reason
|
||||
};
|
||||
} else {
|
||||
const finish_reason = response.choices?.[0]?.finish_reason as CompletionFinishReason;
|
||||
|
||||
const { content, reasoningContent } = (() => {
|
||||
const content = response.choices?.[0]?.message?.content || '';
|
||||
// @ts-ignore
|
||||
@@ -260,7 +268,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
return {
|
||||
answerText: content,
|
||||
reasoningText: reasoningContent
|
||||
reasoningText: reasoningContent,
|
||||
finish_reason
|
||||
};
|
||||
}
|
||||
})();
|
||||
@@ -303,7 +312,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
maxToken: max_tokens,
|
||||
reasoningText,
|
||||
historyPreview: getHistoryPreview(chatCompleteMessages, 10000, aiChatVision),
|
||||
contextTotalLen: completeMessages.length
|
||||
contextTotalLen: completeMessages.length,
|
||||
finishReason: finish_reason
|
||||
},
|
||||
[DispatchNodeResponseKeyEnum.nodeDispatchUsages]: [
|
||||
{
|
||||
@@ -528,15 +538,18 @@ async function streamResponse({
|
||||
});
|
||||
let answer = '';
|
||||
let reasoning = '';
|
||||
let finish_reason: CompletionFinishReason = null;
|
||||
const { parsePart, getStartTagBuffer } = parseReasoningStreamContent();
|
||||
|
||||
for await (const part of stream) {
|
||||
if (res.closed) {
|
||||
stream.controller?.abort();
|
||||
finish_reason = 'close';
|
||||
break;
|
||||
}
|
||||
|
||||
const [reasoningContent, content] = parsePart(part, parseThinkTag);
|
||||
const { reasoningContent, content, finishReason } = parsePart(part, parseThinkTag);
|
||||
finish_reason = finish_reason || finishReason;
|
||||
answer += content;
|
||||
reasoning += reasoningContent;
|
||||
|
||||
@@ -575,5 +588,5 @@ async function streamResponse({
|
||||
}
|
||||
}
|
||||
|
||||
return { answer, reasoning };
|
||||
return { answer, reasoning, finish_reason };
|
||||
}
|
||||
|
@@ -130,6 +130,7 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
timezone,
|
||||
externalProvider,
|
||||
stream = false,
|
||||
version = 'v1',
|
||||
...props
|
||||
} = data;
|
||||
|
||||
@@ -626,6 +627,21 @@ export async function dispatchWorkFlow(data: Props): Promise<DispatchFlowRespons
|
||||
};
|
||||
})();
|
||||
|
||||
// Response node response
|
||||
if (
|
||||
version === 'v2' &&
|
||||
!props.isToolCall &&
|
||||
!props.runningAppInfo.isChildApp &&
|
||||
formatResponseData
|
||||
) {
|
||||
props.workflowStreamResponse?.({
|
||||
event: SseResponseEventEnum.flowNodeResponse,
|
||||
data: {
|
||||
...formatResponseData
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Add output default value
|
||||
node.outputs.forEach((item) => {
|
||||
if (!item.required) return;
|
||||
|
@@ -53,7 +53,8 @@ export const getWorkflowResponseWrite = ({
|
||||
[SseResponseEventEnum.toolCall]: 1,
|
||||
[SseResponseEventEnum.toolParams]: 1,
|
||||
[SseResponseEventEnum.toolResponse]: 1,
|
||||
[SseResponseEventEnum.updateVariables]: 1
|
||||
[SseResponseEventEnum.updateVariables]: 1,
|
||||
[SseResponseEventEnum.flowNodeResponse]: 1
|
||||
};
|
||||
if (!detail && detailEvent[event]) return;
|
||||
|
||||
|
@@ -7,6 +7,7 @@
|
||||
"@xmldom/xmldom": "^0.8.10",
|
||||
"@zilliz/milvus2-sdk-node": "2.4.2",
|
||||
"axios": "^1.8.2",
|
||||
"bullmq": "^5.44.0",
|
||||
"chalk": "^5.3.0",
|
||||
"cheerio": "1.0.0-rc.12",
|
||||
"cookie": "^0.7.1",
|
||||
@@ -18,6 +19,7 @@
|
||||
"file-type": "^19.0.0",
|
||||
"form-data": "^4.0.0",
|
||||
"iconv-lite": "^0.6.3",
|
||||
"ioredis": "^5.6.0",
|
||||
"joplin-turndown-plugin-gfm": "^1.0.12",
|
||||
"json5": "^2.2.3",
|
||||
"jsonpath-plus": "^10.3.0",
|
||||
@@ -27,7 +29,7 @@
|
||||
"mongoose": "^8.10.1",
|
||||
"multer": "1.4.5-lts.1",
|
||||
"mysql2": "^3.11.3",
|
||||
"next": "14.2.25",
|
||||
"next": "14.2.26",
|
||||
"nextjs-cors": "^2.2.0",
|
||||
"node-cron": "^3.0.3",
|
||||
"node-xlsx": "^0.24.0",
|
||||
|
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"extends":"../../tsconfig.json",
|
||||
"extends": "../../tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"baseUrl": "."
|
||||
},
|
||||
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", "**/*.d.ts", "../**/*.d.ts"]
|
||||
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", "**/*.d.ts", "../../**/*.d.ts"]
|
||||
}
|
||||
|
Reference in New Issue
Block a user