sub plan page (#885)

* perf: insert mongo dataset data session

* perf: dataset data index

* remove delay

* rename bill schema

* rename bill record

* perf: bill table

* perf: prompt

* perf: sub plan

* change the usage count

* feat: usage bill

* publish usages

* doc

* 新增团队聊天功能 (#20)

* perf: doc

* feat 添加标签部分

feat 信息团队标签配置

feat 新增团队同步管理

feat team分享页面

feat 完成team分享页面

feat 实现模糊搜索

style 格式化

fix 修复迷糊匹配

style 样式修改

fix 团队标签功能修复

* fix 修复鉴权功能

* merge 合并代码

* fix 修复引用错误

* fix 修复pr问题

* fix 修复ts格式问题

---------

Co-authored-by: archer <545436317@qq.com>
Co-authored-by: liuxingwan <liuxingwan.lxw@alibaba-inc.com>

* update extra plan

* fix: ts

* format

* perf: bill field

* feat: standard plan

* fix: ts

* feat 个人账号页面修改 (#22)

* feat 添加标签部分

feat 信息团队标签配置

feat 新增团队同步管理

feat team分享页面

feat 完成team分享页面

feat 实现模糊搜索

style 格式化

fix 修复迷糊匹配

style 样式修改

fix 团队标签功能修复

* fix 修复鉴权功能

* merge 合并代码

* fix 修复引用错误

* fix 修复pr问题

* fix 修复ts格式问题

* feat 修改个人账号页

---------

Co-authored-by: liuxingwan <liuxingwan.lxw@alibaba-inc.com>

* fix chunk index; error page text

* feat: dataset process Integral prediction

* feat: stand plan field

* feat: sub plan limit

* perf: index

* query extension

* perf: share link push app name

* perf: plan point unit

* perf: get sub plan

* perf: account page

---------

Co-authored-by: yst <77910600+yu-and-liu@users.noreply.github.com>
Co-authored-by: liuxingwan <liuxingwan.lxw@alibaba-inc.com>
This commit is contained in:
Archer
2024-02-23 17:47:34 +08:00
committed by GitHub
parent 7a87f13aa8
commit 443ad37b6a
246 changed files with 6277 additions and 4272 deletions

View File

@@ -1,6 +1,6 @@
import { MongoDatasetTraining } from '@fastgpt/service/core/dataset/training/schema';
import { pushQABill } from '@/service/support/wallet/bill/push';
import { DatasetDataIndexTypeEnum, TrainingModeEnum } from '@fastgpt/global/core/dataset/constants';
import { pushQAUsage } from '@/service/support/wallet/usage/push';
import { TrainingModeEnum } from '@fastgpt/global/core/dataset/constants';
import { sendOneInform } from '../support/user/inform/api';
import { getAIApi } from '@fastgpt/service/core/ai/config';
import type { ChatMessageItemType } from '@fastgpt/global/core/ai/type.d';
@@ -9,12 +9,12 @@ import { splitText2Chunks } from '@fastgpt/global/common/string/textSplitter';
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { Prompt_AgentQA } from '@/global/core/prompt/agent';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { authTeamBalance } from '../support/permission/auth/bill';
import type { PushDatasetDataChunkProps } from '@fastgpt/global/core/dataset/api.d';
import { UserErrEnum } from '@fastgpt/global/common/error/code/user';
import { lockTrainingDataByTeamId } from '@fastgpt/service/core/dataset/training/controller';
import { pushDataToTrainingQueue } from '@/service/core/dataset/data/controller';
import { getLLMModel } from '../core/ai/model';
import { checkTeamAIPoints } from '../support/permission/teamLimit';
import { TeamErrEnum } from '@fastgpt/global/common/error/code/team';
const reduceQueue = () => {
global.qaQueueLen = global.qaQueueLen > 0 ? global.qaQueueLen - 1 : 0;
@@ -89,16 +89,16 @@ export async function generateQA(): Promise<any> {
// auth balance
try {
await authTeamBalance(data.teamId);
await checkTeamAIPoints(data.teamId);
} catch (error: any) {
if (error?.statusText === UserErrEnum.balanceNotEnough) {
if (error?.statusText === TeamErrEnum.aiPointsNotEnough) {
// send inform and lock data
try {
sendOneInform({
type: 'system',
title: '文本训练任务中止',
content:
'该团队账号余额不足,文本训练任务中止,重新充值后将会继续。暂停的任务将在 7 天后被删除。',
'该团队账号的AI积分不足,文本训练任务中止,重新充值后将会继续。暂停的任务将在 7 天后被删除。',
tmbId: data.tmbId
});
console.log('余额不足暂停【QA】生成任务');
@@ -161,7 +161,7 @@ ${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
// add bill
if (insertLen > 0) {
pushQABill({
pushQAUsage({
teamId: data.teamId,
tmbId: data.tmbId,
charsLength: `${prompt}${answer}`.length,
@@ -230,7 +230,6 @@ function formatSplitText(text: string, rawText: string) {
indexes: [
{
defaultIndex: true,
type: DatasetDataIndexTypeEnum.qa,
text: `${q}\n${a.trim().replace(/\n\s*/g, '\n')}`
}
]
@@ -248,7 +247,6 @@ function formatSplitText(text: string, rawText: string) {
indexes: [
{
defaultIndex: true,
type: DatasetDataIndexTypeEnum.chunk,
text: chunk
}
]