mirror of
https://github.com/labring/FastGPT.git
synced 2025-10-17 16:45:02 +00:00

* feat: favorite apps & quick apps with their own configuration (#5515) * chore: extract chat history and drawer; fix model selector * feat: display favourite apps and make it configurable * feat: favorite apps & quick apps with their own configuration * fix: fix tab title and add loading state for searching * fix: cascade delete favorite app and quick app while deleting relative app * chore: make improvements * fix: favourite apps ui * fix: add permission for quick apps * chore: fix permission & clear redundant code * perf: chat home page code * chatbox ui * fix: 4.12.2-dev (#5520) * fix: add empty placeholder; fix app quick status; fix tag and layout * chore: add tab query for the setting tabs * chore: use `useConfirm` hook instead of `MyModal` * remove log * fix: fix modal padding (#5521) * perf: manage app * feat: enhance model provider handling and update icon references (#5493) * perf: model provider * sdk package * refactor: create llm response (#5499) * feat: add LLM response processing functions, including the creation of stream-based and complete responses * feat: add volta configuration for node and pnpm versions * refactor: update LLM response handling and event structure in tool choice logic * feat: update LLM response structure and integrate with tool choice logic * refactor: clean up imports and remove unused streamResponse function in chat and toolChoice modules * refactor: rename answer variable to answerBuffer for clarity in LLM response handling * feat: enhance LLM response handling with tool options and integrate tools into chat and tool choice logic * refactor: remove volta configuration from package.json * refactor: reorganize LLM response types and ensure default values for token counts * refactor: streamline LLM response handling by consolidating response structure and removing redundant checks * refactor: enhance LLM response handling by consolidating tool options and streamlining event callbacks * fix: build error * refactor: update tool type definitions for consistency in tool handling * feat: llm request function * fix: ts * fix: ts * fix: ahook ts * fix: variable name * update lock * ts version * doc * remove log * fix: translation type * perf: workflow status check * fix: ts * fix: prompt tool call * fix: fix missing plugin interact window & make tag draggable (#5527) * fix: incorrect select quick apps state; filter apps type (#5528) * fix: usesafe translation * perf: add quickapp modal --------- Co-authored-by: 伍闲犬 <whoeverimf5@gmail.com> Co-authored-by: Ctrlz <143257420+ctrlz526@users.noreply.github.com> Co-authored-by: francis <zhichengfan18@gmail.com>
87 lines
2.1 KiB
TypeScript
87 lines
2.1 KiB
TypeScript
import { i18nT } from '../../../web/i18n/utils';
|
|
import type { LLMModelItemType, STTModelType, EmbeddingModelItemType } from './model.d';
|
|
import { getModelProvider, type ModelProviderIdType } from './provider';
|
|
|
|
export enum ModelTypeEnum {
|
|
llm = 'llm',
|
|
embedding = 'embedding',
|
|
tts = 'tts',
|
|
stt = 'stt',
|
|
rerank = 'rerank'
|
|
}
|
|
|
|
export const defaultQAModels: LLMModelItemType[] = [
|
|
{
|
|
type: ModelTypeEnum.llm,
|
|
provider: 'OpenAI',
|
|
model: 'gpt-5',
|
|
name: 'gpt-5',
|
|
maxContext: 16000,
|
|
maxResponse: 16000,
|
|
quoteMaxToken: 13000,
|
|
maxTemperature: 1.2,
|
|
charsPointsPrice: 0,
|
|
censor: false,
|
|
vision: true,
|
|
datasetProcess: true,
|
|
toolChoice: true,
|
|
functionCall: false,
|
|
defaultSystemChatPrompt: '',
|
|
defaultConfig: {}
|
|
}
|
|
];
|
|
|
|
export const defaultVectorModels: EmbeddingModelItemType[] = [
|
|
{
|
|
type: ModelTypeEnum.embedding,
|
|
provider: 'OpenAI',
|
|
model: 'text-embedding-3-small',
|
|
name: 'Embedding-2',
|
|
charsPointsPrice: 0,
|
|
defaultToken: 500,
|
|
maxToken: 3000,
|
|
weight: 100
|
|
}
|
|
];
|
|
|
|
export const defaultSTTModels: STTModelType[] = [
|
|
{
|
|
type: ModelTypeEnum.stt,
|
|
provider: 'OpenAI',
|
|
model: 'whisper-1',
|
|
name: 'whisper-1',
|
|
charsPointsPrice: 0
|
|
}
|
|
];
|
|
|
|
export const getModelFromList = (
|
|
modelList: { provider: ModelProviderIdType; name: string; model: string }[],
|
|
model: string,
|
|
language: string
|
|
):
|
|
| {
|
|
avatar: string;
|
|
provider: ModelProviderIdType;
|
|
name: string;
|
|
model: string;
|
|
}
|
|
| undefined => {
|
|
const modelData = modelList.find((item) => item.model === model) ?? modelList[0];
|
|
if (!modelData) {
|
|
return;
|
|
}
|
|
const provider = getModelProvider(modelData.provider, language);
|
|
return {
|
|
...modelData,
|
|
avatar: provider.avatar
|
|
};
|
|
};
|
|
|
|
export const modelTypeList = [
|
|
{ label: i18nT('common:model.type.chat'), value: ModelTypeEnum.llm },
|
|
{ label: i18nT('common:model.type.embedding'), value: ModelTypeEnum.embedding },
|
|
{ label: i18nT('common:model.type.tts'), value: ModelTypeEnum.tts },
|
|
{ label: i18nT('common:model.type.stt'), value: ModelTypeEnum.stt },
|
|
{ label: i18nT('common:model.type.reRank'), value: ModelTypeEnum.rerank }
|
|
];
|