File input (#2270)

* doc

* feat: file upload  config

* perf: chat box file params

* feat: markdown show file

* feat: chat file store and clear

* perf: read file contentType

* feat: llm vision config

* feat: file url output

* perf: plugin error text

* perf: image load

* feat: ai chat document

* perf: file block ui

* feat: read file node

* feat: file read response field

* feat: simple mode support read files

* feat: tool call

* feat: read file histories

* perf: select file

* perf: select file config

* i18n

* i18n

* fix: ts; feat: tool response preview result
This commit is contained in:
Archer
2024-08-06 10:00:22 +08:00
committed by GitHub
parent 10dcdb5491
commit e36d9d794f
121 changed files with 2600 additions and 1142 deletions

View File

@@ -1,6 +1,7 @@
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d';
import { getAIApi } from '../config';
import { countGptMessagesTokens } from '../../../common/string/tiktoken/index';
import { loadRequestMessages } from '../../chat/utils';
export const Prompt_QuestionGuide = `你是一个AI智能助手可以回答和解决我的问题。请结合前面的对话记录帮我生成 3 个问题引导我继续提问。问题的长度应小于20个字符按 JSON 格式返回: ["问题1", "问题2", "问题3"]`;
@@ -25,7 +26,10 @@ export async function createQuestionGuide({
model: model,
temperature: 0.1,
max_tokens: 200,
messages: concatMessages,
messages: await loadRequestMessages({
messages: concatMessages,
useVision: false
}),
stream: false
});

View File

@@ -0,0 +1,39 @@
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
import { countGptMessagesTokens } from '../../common/string/tiktoken';
export const computedMaxToken = async ({
maxToken,
model,
filterMessages = []
}: {
maxToken: number;
model: LLMModelItemType;
filterMessages: ChatCompletionMessageParam[];
}) => {
maxToken = Math.min(maxToken, model.maxResponse);
const tokensLimit = model.maxContext;
/* count response max token */
const promptsToken = await countGptMessagesTokens(filterMessages);
maxToken = promptsToken + maxToken > tokensLimit ? tokensLimit - promptsToken : maxToken;
if (maxToken <= 0) {
maxToken = 200;
}
return maxToken;
};
// FastGPT temperature range: [0,10], ai temperature:[0,2],{0,1]……
export const computedTemperature = ({
model,
temperature
}: {
model: LLMModelItemType;
temperature: number;
}) => {
temperature = +(model.maxTemperature * (temperature / 10)).toFixed(2);
temperature = Math.max(temperature, 0.01);
return temperature;
};