4.6.8-alpha (#804)

* perf: redirect request and err log replace

perf: dataset openapi

feat: session

fix: retry input error

feat: 468 doc

sub page

feat: standard sub

perf: rerank tip

perf: rerank tip

perf: api sdk

perf: openapi

sub plan

perf: sub ui

fix: ts

* perf: init log

* fix: variable select

* sub page

* icon

* perf: llm model config

* perf: menu ux

* perf: system store

* perf: publish app name

* fix: init data

* perf: flow edit ux

* fix: value type format and ux

* fix prompt editor default value (#13)

* fix prompt editor default value

* fix prompt editor update when not focus

* add key with variable

---------

Co-authored-by: Archer <545436317@qq.com>

* fix: value type

* doc

* i18n

* import path

* home page

* perf: mongo session running

* fix: ts

* perf: use toast

* perf: flow edit

* perf: sse response

* slider ui

* fetch error

* fix prompt editor rerender when not focus by key defaultvalue (#14)

* perf: prompt editor

* feat: dataset search concat

* perf: doc

* fix:ts

* perf: doc

* fix json editor onblur value (#15)

* faq

* vector model default config

* ipv6

---------

Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
Archer
2024-02-01 21:57:41 +08:00
committed by GitHub
parent fc19c4cf09
commit 34602b25df
285 changed files with 10345 additions and 11223 deletions

View File

@@ -14,6 +14,7 @@ import type { PushDatasetDataChunkProps } from '@fastgpt/global/core/dataset/api
import { UserErrEnum } from '@fastgpt/global/common/error/code/user';
import { lockTrainingDataByTeamId } from '@fastgpt/service/core/dataset/training/controller';
import { pushDataToTrainingQueue } from '@/service/core/dataset/data/controller';
import { getLLMModel } from '../core/ai/model';
const reduceQueue = () => {
global.qaQueueLen = global.qaQueueLen > 0 ? global.qaQueueLen - 1 : 0;
@@ -111,7 +112,7 @@ export async function generateQA(): Promise<any> {
try {
const startTime = Date.now();
const model = data.model ?? global.qaModels[0].model;
const model = getLLMModel(data.model)?.model;
const prompt = `${data.prompt || Prompt_AgentQA.description}
${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
@@ -123,7 +124,9 @@ ${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
}
];
const ai = getAIApi(undefined, 600000);
const ai = getAIApi({
timeout: 600000
});
const chatResponse = await ai.chat.completions.create({
model,
temperature: 0.3,