mirror of
https://github.com/labring/FastGPT.git
synced 2025-08-01 11:58:38 +00:00
4.8.9 test (#2299)
* perf: read file prompt * perf: read file prompt * perf: free plan tip * feat: cron job usage * perf: app templates * perf: get llm model by name * feat: support outlink upload file * fix: upload limit
This commit is contained in:
@@ -20,6 +20,7 @@ import { getDocPath } from '@/web/common/system/doc';
|
||||
import AIModelSelector from '@/components/Select/AIModelSelector';
|
||||
import { LLMModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
|
||||
import { getWebLLMModel } from '@/web/common/system/utils';
|
||||
|
||||
const AIChatSettingsModal = ({
|
||||
onClose,
|
||||
@@ -44,18 +45,18 @@ const AIChatSettingsModal = ({
|
||||
const showVisionSwitch = watch(NodeInputKeyEnum.aiChatVision) !== undefined;
|
||||
const showMaxHistoriesSlider = watch('maxHistories') !== undefined;
|
||||
const useVision = watch('aiChatVision');
|
||||
const selectedModel = llmModelList.find((item) => item.model === model) || llmModelList[0];
|
||||
const selectedModel = getWebLLMModel(model);
|
||||
const llmSupportVision = !!selectedModel?.vision;
|
||||
|
||||
const tokenLimit = useMemo(() => {
|
||||
return llmModelList.find((item) => item.model === model)?.maxResponse || 4096;
|
||||
}, [llmModelList, model]);
|
||||
return selectedModel?.maxResponse || 4096;
|
||||
}, [selectedModel?.maxResponse]);
|
||||
|
||||
const onChangeModel = (e: string) => {
|
||||
setValue('model', e);
|
||||
|
||||
// update max tokens
|
||||
const modelData = llmModelList.find((item) => item.model === e);
|
||||
const modelData = getWebLLMModel(e);
|
||||
if (modelData) {
|
||||
setValue('maxToken', modelData.maxResponse / 2);
|
||||
}
|
||||
|
@@ -137,6 +137,7 @@ const ChatInput = ({
|
||||
const { previewUrl } = await uploadFile2DB({
|
||||
file: copyFile.rawFile,
|
||||
bucketName: 'chat',
|
||||
outLinkAuthData,
|
||||
metadata: {
|
||||
chatId
|
||||
},
|
||||
@@ -168,7 +169,7 @@ const ChatInput = ({
|
||||
{
|
||||
manual: false,
|
||||
errorToast: t('common:upload_file_error'),
|
||||
refreshDeps: [fileList]
|
||||
refreshDeps: [fileList, outLinkAuthData, chatId]
|
||||
}
|
||||
);
|
||||
const onSelectFile = useCallback(
|
||||
|
@@ -7,6 +7,7 @@ import {
|
||||
import { useTranslation } from 'next-i18next';
|
||||
import React, { useMemo } from 'react';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import { getWebLLMModel } from '@/web/common/system/utils';
|
||||
|
||||
const SearchParamsTip = ({
|
||||
searchMode,
|
||||
@@ -34,11 +35,8 @@ const SearchParamsTip = ({
|
||||
|
||||
const extensionModelName = useMemo(
|
||||
() =>
|
||||
datasetSearchUsingExtensionQuery
|
||||
? llmModelList.find((item) => item.model === queryExtensionModel)?.name ??
|
||||
llmModelList[0]?.name
|
||||
: undefined,
|
||||
[datasetSearchUsingExtensionQuery, llmModelList, queryExtensionModel]
|
||||
datasetSearchUsingExtensionQuery ? getWebLLMModel(queryExtensionModel)?.name : undefined,
|
||||
[datasetSearchUsingExtensionQuery, queryExtensionModel, llmModelList]
|
||||
);
|
||||
|
||||
return (
|
||||
|
Reference in New Issue
Block a user