mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-21 11:43:56 +00:00

* update: Add type * fix: update import statement for NextApiRequest type * fix: update imports to use type for LexicalEditor and EditorState * Refactor imports to use 'import type' for type-only imports across multiple files - Updated imports in various components and API files to use 'import type' for better clarity and to optimize TypeScript's type checking. - Ensured consistent usage of type imports in files related to chat, dataset, workflow, and user management. - Improved code readability and maintainability by distinguishing between value and type imports. * refactor: remove old ESLint configuration and add new rules - Deleted the old ESLint configuration file from the app project. - Added a new ESLint configuration file with updated rules and settings. - Changed imports to use type-only imports in various files for better clarity and performance. - Updated TypeScript configuration to remove unnecessary options. - Added an ESLint ignore file to exclude build and dependency directories from linting. * fix: update imports to use 'import type' for type-only imports in schema files
70 lines
1.9 KiB
TypeScript
70 lines
1.9 KiB
TypeScript
import { ModelTypeEnum } from 'packages/global/core/ai/model';
|
|
import type { ModelProviderIdType } from 'packages/global/core/ai/provider';
|
|
|
|
export default async function setupModels() {
|
|
global.llmModelMap = new Map<string, any>();
|
|
global.embeddingModelMap = new Map<string, any>();
|
|
global.llmModelMap.set('gpt-4o-mini', {
|
|
type: ModelTypeEnum.llm,
|
|
model: 'gpt-4o-mini',
|
|
name: 'gpt-4o-mini',
|
|
avatar: 'gpt-4o-mini',
|
|
isActive: true,
|
|
isDefault: true,
|
|
isCustom: false,
|
|
requestUrl: undefined,
|
|
requestAuth: undefined,
|
|
customCQPrompt: '',
|
|
customExtractPrompt: '',
|
|
defaultSystemChatPrompt: undefined,
|
|
fieldMap: undefined,
|
|
defaultConfig: undefined,
|
|
provider: 'OpenAI' as ModelProviderIdType,
|
|
functionCall: false,
|
|
toolChoice: false,
|
|
maxContext: 4096,
|
|
maxResponse: 4096,
|
|
quoteMaxToken: 2048
|
|
});
|
|
global.systemDefaultModel = {
|
|
llm: {
|
|
type: ModelTypeEnum.llm,
|
|
model: 'gpt-4o-mini',
|
|
name: 'gpt-4o-mini',
|
|
avatar: 'gpt-4o-mini',
|
|
isActive: true,
|
|
isDefault: true,
|
|
isCustom: false,
|
|
requestUrl: undefined,
|
|
requestAuth: undefined,
|
|
customCQPrompt: '',
|
|
customExtractPrompt: '',
|
|
defaultSystemChatPrompt: undefined,
|
|
fieldMap: undefined,
|
|
defaultConfig: undefined,
|
|
provider: 'OpenAI' as ModelProviderIdType,
|
|
functionCall: false,
|
|
toolChoice: false,
|
|
maxContext: 4096,
|
|
maxResponse: 4096,
|
|
quoteMaxToken: 2048
|
|
},
|
|
embedding: {
|
|
type: ModelTypeEnum.embedding,
|
|
model: 'text-embedding-ada-002',
|
|
name: 'text-embedding-ada-002',
|
|
avatar: 'text-embedding-ada-002',
|
|
isActive: true,
|
|
isDefault: true,
|
|
isCustom: false,
|
|
requestUrl: undefined,
|
|
requestAuth: undefined,
|
|
defaultConfig: undefined,
|
|
defaultToken: 1,
|
|
maxToken: 100,
|
|
provider: 'OpenAI',
|
|
weight: 1
|
|
}
|
|
};
|
|
}
|