4.8.13 feature (#3118)

* chore(ui): login page & workflow page (#3046)

* login page & number input & multirow select & llm select

* workflow

* adjust nodes

* New file upload (#3058)

* feat: toolNode aiNode readFileNode adapt new version

* update docker-compose

* update tip

* feat: adapt new file version

* perf: file input

* fix: ts

* feat: add chat history time label (#3024)

* feat:add chat and logs time

* feat: add chat history time label

* code perf

* code perf

---------

Co-authored-by: 勤劳上班的卑微小张 <jiazhan.zhang@ggimage.com>

* add chatType (#3060)

* pref: slow query of full text search (#3044)

* Adapt findLast api;perf: markdown zh format. (#3066)

* perf: context code

* fix: adapt findLast api

* perf: commercial plugin run error

* perf: markdown zh format

* perf: dockerfile proxy (#3067)

* fix ui (#3065)

* fix ui

* fix

* feat: support array reference multi-select (#3041)

* feat: support array reference multi-select

* fix build

* fix

* fix loop multi-select

* adjust condition

* fix get value

* array and non-array conversion

* fix plugin input

* merge func

* feat: iframe code block;perf: workflow selector type (#3076)

* feat: iframe code block

* perf: workflow selector type

* node pluginoutput check (#3074)

* feat: View will move when workflow check error;fix: ui refresh error when continuous file upload (#3077)

* fix: plugin output check

* fix: ui refresh error when continuous file upload

* feat: View will move when workflow check error

* add dispatch try catch (#3075)

* perf: workflow context split (#3083)

* perf: workflow context split

* perf: context

* 4.8.13 test (#3085)

* perf: workflow node ui

* chat iframe url

* feat: support sub route config (#3071)

* feat: support sub route config

* dockerfile

* fix upload

* delete unused code

* 4.8.13 test (#3087)

* fix: image expired

* fix: datacard navbar ui

* perf: build action

* fix: workflow file upload refresh (#3088)

* fix: http tool response (#3097)

* loop node dynamic height (#3092)

* loop node dynamic height

* fix

* fix

* feat: support push chat log (#3093)

* feat: custom uid/metadata

* to: custom info

* fix: chat push latest

* feat: add chat log envs

* refactor: move timer to pushChatLog

* fix: using precise log

---------

Co-authored-by: Finley Ge <m13203533462@163.com>

* 4.8.13 test (#3098)

* perf: loop node refresh

* rename context

* comment

* fix: ts

* perf: push chat log

* array reference check & node ui (#3100)

* feat: loop start add index (#3101)

* feat: loop start add index

* update doc

* 4.8.13 test (#3102)

* fix: loop index;edge parent check

* perf: reference invalid check

* fix: ts

* fix: plugin select files and ai response check (#3104)

* fix: plugin select files and ai response check

* perf: text editor selector;tool call tip;remove invalid image url;

* perf: select file

* perf: drop files

* feat: source id prefix env (#3103)

* 4.8.13 test (#3106)

* perf: select file

* perf: drop files

* perf: env template

* 4.8.13 test (#3107)

* perf: select file

* perf: drop files

* fix: imple mode adapt files

* perf: push chat log (#3109)

* fix: share page load title error (#3111)

* 4.8.13 perf (#3112)

* fix: share page load title error

* update file input doc

* perf: auto add file urls

* perf: auto ser loop node offset height

* 4.8.13 test (#3117)

* perf: plugin

* updat eaction

* feat: add more share config (#3120)

* feat: add more share config

* add i18n en

* fix: missing subroute (#3121)

* perf: outlink config (#3128)

* update action

* perf: outlink config

* fix: ts (#3129)

* 更新 docSite 文档内容 (#3131)

* fix: null pointer (#3130)

* fix: null pointer

* perf: not input text

* update doc url

* perf: outlink default value (#3134)

* update doc (#3136)

* 4.8.13 test (#3137)

* update doc

* perf: completions chat api

* Restore docSite content based on upstream/4.8.13-dev (#3138)

* Restore docSite content based on upstream/4.8.13-dev

* 4813.md缺少更正

* update doc (#3141)

---------

Co-authored-by: heheer <heheer@sealos.io>
Co-authored-by: papapatrick <109422393+Patrickill@users.noreply.github.com>
Co-authored-by: 勤劳上班的卑微小张 <jiazhan.zhang@ggimage.com>
Co-authored-by: Finley Ge <32237950+FinleyGe@users.noreply.github.com>
Co-authored-by: a.e. <49438478+I-Info@users.noreply.github.com>
Co-authored-by: Finley Ge <m13203533462@163.com>
Co-authored-by: Jiangween <145003935+Jiangween@users.noreply.github.com>
This commit is contained in:
Archer
2024-11-13 11:29:53 +08:00
committed by GitHub
parent e1f5483432
commit e9d52ada73
449 changed files with 7626 additions and 4180 deletions

View File

@@ -1,5 +1,11 @@
import type { UserModelSchema } from '@fastgpt/global/support/user/type';
import OpenAI from '@fastgpt/global/core/ai';
import {
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming
} from '@fastgpt/global/core/ai/type';
import { getErrText } from '@fastgpt/global/common/error/utils';
import { addLog } from '../../common/system/log';
export const openaiBaseUrl = process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
@@ -34,3 +40,55 @@ export const getAxiosConfig = (props?: { userKey?: UserModelSchema['openaiAccoun
authorization: `Bearer ${apiKey}`
};
};
type CompletionsBodyType =
| ChatCompletionCreateParamsNonStreaming
| ChatCompletionCreateParamsStreaming;
type InferResponseType<T extends CompletionsBodyType> =
T extends ChatCompletionCreateParamsStreaming
? OpenAI.Chat.Completions.ChatCompletionChunk
: OpenAI.Chat.Completions.ChatCompletion;
export const createChatCompletion = async <T extends CompletionsBodyType>({
body,
userKey,
timeout,
options
}: {
body: T;
userKey?: UserModelSchema['openaiAccount'];
timeout?: number;
options?: OpenAI.RequestOptions;
}): Promise<{
response: InferResponseType<T>;
isStreamResponse: boolean;
}> => {
try {
const formatTimeout = timeout ? timeout : body.stream ? 60000 : 600000;
const ai = getAIApi({
userKey,
timeout: formatTimeout
});
const response = await ai.chat.completions.create(body, options);
const isStreamResponse =
typeof response === 'object' &&
response !== null &&
('iterator' in response || 'controller' in response);
return {
response: response as InferResponseType<T>,
isStreamResponse
};
} catch (error) {
addLog.error(`LLM response error`, error);
addLog.warn(`LLM response error`, {
baseUrl: userKey?.baseUrl,
requestBody: body
});
if (userKey?.baseUrl) {
return Promise.reject(`您的 OpenAI key 出错了: ${getErrText(error)}`);
}
return Promise.reject(error);
}
};

View File

@@ -55,7 +55,7 @@ export async function getVectorsByText({ model, input, type }: GetVectorProps) {
return result;
} catch (error) {
console.log(`Embedding Error`, error);
addLog.error(`Embedding Error`, error);
return Promise.reject(error);
}

View File

@@ -1,5 +1,5 @@
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d';
import { getAIApi } from '../config';
import { createChatCompletion } from '../config';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { loadRequestMessages } from '../../chat/utils';
import { llmCompletionsBodyFormat } from '../utils';
@@ -29,11 +29,8 @@ export async function createQuestionGuide({
}
];
const ai = getAIApi({
timeout: 480000
});
const data = await ai.chat.completions.create(
llmCompletionsBodyFormat(
const { response: data } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
model,
temperature: 0.1,
@@ -46,7 +43,7 @@ export async function createQuestionGuide({
},
model
)
);
});
const answer = data.choices?.[0]?.message?.content || '';

View File

@@ -1,8 +1,7 @@
import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { getAIApi } from '../config';
import { createChatCompletion } from '../config';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { ChatCompletion, ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { chatValue2RuntimePrompt } from '@fastgpt/global/core/chat/adapt';
import { getLLMModel } from '../model';
import { llmCompletionsBodyFormat } from '../utils';
@@ -138,10 +137,6 @@ A: ${chatBg}
const modelData = getLLMModel(model);
const ai = getAIApi({
timeout: 480000
});
const messages = [
{
role: 'user',
@@ -150,20 +145,19 @@ A: ${chatBg}
histories: concatFewShot
})
}
] as ChatCompletionMessageParam[];
] as any;
const result = (await ai.chat.completions.create(
llmCompletionsBodyFormat(
const { response: result } = await createChatCompletion({
body: llmCompletionsBodyFormat(
{
stream: false,
model: modelData.model,
temperature: 0.01,
// @ts-ignore
messages
},
modelData
)
)) as ChatCompletion;
});
let answer = result.choices?.[0]?.message?.content || '';
if (!answer) {

View File

@@ -48,14 +48,17 @@ export const computedTemperature = ({
type CompletionsBodyType =
| ChatCompletionCreateParamsNonStreaming
| ChatCompletionCreateParamsStreaming;
type InferCompletionsBody<T> = T extends { stream: true }
? ChatCompletionCreateParamsStreaming
: ChatCompletionCreateParamsNonStreaming;
export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
body: T,
model: string | LLMModelItemType
) => {
): InferCompletionsBody<T> => {
const modelData = typeof model === 'string' ? getLLMModel(model) : model;
if (!modelData) {
return body;
return body as InferCompletionsBody<T>;
}
const requestBody: T = {
@@ -81,5 +84,5 @@ export const llmCompletionsBodyFormat = <T extends CompletionsBodyType>(
// console.log(requestBody);
return requestBody;
return requestBody as InferCompletionsBody<T>;
};