mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 05:12:39 +00:00
feat: update ESLint config with @typescript-eslint/consistent-type-imports (#4746)
* update: Add type * fix: update import statement for NextApiRequest type * fix: update imports to use type for LexicalEditor and EditorState * Refactor imports to use 'import type' for type-only imports across multiple files - Updated imports in various components and API files to use 'import type' for better clarity and to optimize TypeScript's type checking. - Ensured consistent usage of type imports in files related to chat, dataset, workflow, and user management. - Improved code readability and maintainability by distinguishing between value and type imports. * refactor: remove old ESLint configuration and add new rules - Deleted the old ESLint configuration file from the app project. - Added a new ESLint configuration file with updated rules and settings. - Changed imports to use type-only imports in various files for better clarity and performance. - Updated TypeScript configuration to remove unnecessary options. - Added an ESLint ignore file to exclude build and dependency directories from linting. * fix: update imports to use 'import type' for type-only imports in schema files
This commit is contained in:
@@ -1,8 +1,8 @@
|
||||
import fs from 'fs';
|
||||
import type fs from 'fs';
|
||||
import { getAxiosConfig } from '../config';
|
||||
import axios from 'axios';
|
||||
import FormData from 'form-data';
|
||||
import { STTModelType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { type STTModelType } from '@fastgpt/global/core/ai/model.d';
|
||||
|
||||
export const aiTranscriptions = async ({
|
||||
model: modelData,
|
||||
|
@@ -8,9 +8,9 @@ import type {
|
||||
import { getErrText } from '@fastgpt/global/common/error/utils';
|
||||
import { addLog } from '../../common/system/log';
|
||||
import { i18nT } from '../../../web/i18n/utils';
|
||||
import { OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
|
||||
import { type OpenaiAccountType } from '@fastgpt/global/support/user/team/type';
|
||||
import { getLLMModel } from './model';
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { type LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
|
||||
const aiProxyBaseUrl = process.env.AIPROXY_API_ENDPOINT
|
||||
? `${process.env.AIPROXY_API_ENDPOINT}/v1`
|
||||
|
@@ -1,20 +1,20 @@
|
||||
import path from 'path';
|
||||
import * as fs from 'fs';
|
||||
import { SystemModelItemType } from '../type';
|
||||
import { type SystemModelItemType } from '../type';
|
||||
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
|
||||
import { MongoSystemModel } from './schema';
|
||||
import {
|
||||
LLMModelItemType,
|
||||
EmbeddingModelItemType,
|
||||
TTSModelType,
|
||||
STTModelType,
|
||||
RerankModelItemType
|
||||
type LLMModelItemType,
|
||||
type EmbeddingModelItemType,
|
||||
type TTSModelType,
|
||||
type STTModelType,
|
||||
type RerankModelItemType
|
||||
} from '@fastgpt/global/core/ai/model.d';
|
||||
import { debounce } from 'lodash';
|
||||
import {
|
||||
getModelProvider,
|
||||
ModelProviderIdType,
|
||||
ModelProviderType
|
||||
type ModelProviderIdType,
|
||||
type ModelProviderType
|
||||
} from '@fastgpt/global/core/ai/provider';
|
||||
import { findModelFromAlldata } from '../model';
|
||||
import {
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import { EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { type EmbeddingModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getAIApi } from '../config';
|
||||
import { countPromptTokens } from '../../../common/string/tiktoken/index';
|
||||
import { EmbeddingTypeEnm } from '@fastgpt/global/core/ai/constants';
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { createChatCompletion } from '../config';
|
||||
import { ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { type ChatItemType } from '@fastgpt/global/core/chat/type';
|
||||
import { countGptMessagesTokens, countPromptTokens } from '../../../common/string/tiktoken/index';
|
||||
import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt';
|
||||
import { getLLMModel } from '../model';
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import { cloneDeep } from 'lodash';
|
||||
import { SystemModelItemType } from './type';
|
||||
import { type SystemModelItemType } from './type';
|
||||
|
||||
export const getDefaultLLMModel = () => global?.systemDefaultModel.llm!;
|
||||
export const getLLMModel = (model?: string) => {
|
||||
|
@@ -2,7 +2,7 @@ import { addLog } from '../../../common/system/log';
|
||||
import { POST } from '../../../common/api/serverRequest';
|
||||
import { getDefaultRerankModel } from '../model';
|
||||
import { getAxiosConfig } from '../config';
|
||||
import { RerankModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { type RerankModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { countPromptTokens } from '../../../common/string/tiktoken';
|
||||
|
||||
type PostReRankResponse = {
|
||||
|
4
packages/service/core/ai/type.d.ts
vendored
4
packages/service/core/ai/type.d.ts
vendored
@@ -1,5 +1,5 @@
|
||||
import { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
|
||||
import {
|
||||
import type { ModelTypeEnum } from '@fastgpt/global/core/ai/model';
|
||||
import type {
|
||||
STTModelType,
|
||||
RerankModelItemType,
|
||||
TTSModelType,
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import {
|
||||
import { type LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import type {
|
||||
ChatCompletionCreateParamsNonStreaming,
|
||||
ChatCompletionCreateParamsStreaming,
|
||||
CompletionFinishReason,
|
||||
|
Reference in New Issue
Block a user