mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-29 09:44:47 +00:00
4.8.9 test (#2299)
* perf: read file prompt * perf: read file prompt * perf: free plan tip * feat: cron job usage * perf: app templates * perf: get llm model by name * feat: support outlink upload file * fix: upload limit
This commit is contained in:
@@ -1,6 +1,5 @@
|
||||
import type { NextApiRequest, NextApiResponse } from 'next';
|
||||
import { jsonRes } from '@fastgpt/service/common/response';
|
||||
import { authCert } from '@fastgpt/service/support/permission/auth/common';
|
||||
import { uploadFile } from '@fastgpt/service/common/file/gridfs/controller';
|
||||
import { getUploadModel } from '@fastgpt/service/common/file/multer';
|
||||
import { removeFilesByPaths } from '@fastgpt/service/common/file/utils';
|
||||
@@ -10,6 +9,7 @@ import { ReadFileBaseUrl } from '@fastgpt/global/common/file/constants';
|
||||
import { addLog } from '@fastgpt/service/common/system/log';
|
||||
import { authFrequencyLimit } from '@/service/common/frequencyLimit/api';
|
||||
import { addSeconds } from 'date-fns';
|
||||
import { authChatCert } from '@/service/support/permission/auth/chat';
|
||||
|
||||
const authUploadLimit = (tmbId: string) => {
|
||||
if (!global.feConfigs.uploadFileMaxAmount) return;
|
||||
@@ -21,20 +21,19 @@ const authUploadLimit = (tmbId: string) => {
|
||||
};
|
||||
|
||||
async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
const start = Date.now();
|
||||
/* Creates the multer uploader */
|
||||
const upload = getUploadModel({
|
||||
maxSize: (global.feConfigs?.uploadFileMaxSize || 500) * 1024 * 1024
|
||||
});
|
||||
const filePaths: string[] = [];
|
||||
|
||||
try {
|
||||
const { teamId, tmbId } = await authCert({ req, authToken: true });
|
||||
|
||||
await authUploadLimit(tmbId);
|
||||
|
||||
const start = Date.now();
|
||||
/* Creates the multer uploader */
|
||||
const upload = getUploadModel({
|
||||
maxSize: (global.feConfigs?.uploadFileMaxSize || 500) * 1024 * 1024
|
||||
});
|
||||
const { file, bucketName, metadata } = await upload.doUpload(req, res);
|
||||
|
||||
const { teamId, tmbId, outLinkUid } = await authChatCert({ req, authToken: true });
|
||||
|
||||
await authUploadLimit(outLinkUid || tmbId);
|
||||
|
||||
addLog.info(`Upload file success ${file.originalname}, cost ${Date.now() - start}ms`);
|
||||
|
||||
if (!bucketName) {
|
||||
@@ -51,15 +50,19 @@ async function handler(req: NextApiRequest, res: NextApiResponse<any>) {
|
||||
metadata: metadata
|
||||
});
|
||||
|
||||
return {
|
||||
fileId,
|
||||
previewUrl: `${ReadFileBaseUrl}?filename=${file.originalname}&token=${await createFileToken({
|
||||
bucketName,
|
||||
teamId,
|
||||
tmbId,
|
||||
fileId
|
||||
})}`
|
||||
};
|
||||
jsonRes(res, {
|
||||
data: {
|
||||
fileId,
|
||||
previewUrl: `${ReadFileBaseUrl}?filename=${file.originalname}&token=${await createFileToken(
|
||||
{
|
||||
bucketName,
|
||||
teamId,
|
||||
tmbId,
|
||||
fileId
|
||||
}
|
||||
)}`
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
jsonRes(res, {
|
||||
code: 500,
|
||||
|
@@ -35,6 +35,7 @@ import { AppContext } from '@/pages/app/detail/components/context';
|
||||
import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip';
|
||||
import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
|
||||
import VariableTip from '@/components/common/Textarea/MyTextarea/VariableTip';
|
||||
import { getWebLLMModel } from '@/web/common/system/utils';
|
||||
|
||||
const DatasetSelectModal = dynamic(() => import('@/components/core/app/DatasetSelectModal'));
|
||||
const DatasetParamsModal = dynamic(() => import('@/components/core/app/DatasetParamsModal'));
|
||||
@@ -121,8 +122,7 @@ const EditForm = ({
|
||||
[appForm.chatConfig.variables, t]
|
||||
);
|
||||
|
||||
const selectedModel =
|
||||
llmModelList.find((item) => item.model === appForm.aiSettings.model) ?? llmModelList[0];
|
||||
const selectedModel = getWebLLMModel(appForm.aiSettings.model);
|
||||
const tokenLimit = useMemo(() => {
|
||||
return selectedModel?.quoteMaxToken || 3000;
|
||||
}, [selectedModel.quoteMaxToken]);
|
||||
|
@@ -29,6 +29,7 @@ import FormLabel from '@fastgpt/web/components/common/MyBox/FormLabel';
|
||||
import ValueTypeLabel from './render/ValueTypeLabel';
|
||||
import MyIcon from '@fastgpt/web/components/common/Icon';
|
||||
import { isWorkflowStartOutput } from '@fastgpt/global/core/workflow/template/system/workflowStart';
|
||||
import { getWebLLMModel } from '@/web/common/system/utils';
|
||||
|
||||
const NodeDatasetConcat = ({ data, selected }: NodeProps<FlowNodeItemType>) => {
|
||||
const { t } = useTranslation();
|
||||
@@ -46,8 +47,7 @@ const NodeDatasetConcat = ({ data, selected }: NodeProps<FlowNodeItemType>) => {
|
||||
if (item.flowNodeType === FlowNodeTypeEnum.chatNode) {
|
||||
const model =
|
||||
item.inputs.find((item) => item.key === NodeInputKeyEnum.aiModel)?.value || '';
|
||||
const quoteMaxToken =
|
||||
llmModelList.find((item) => item.model === model)?.quoteMaxToken || 3000;
|
||||
const quoteMaxToken = getWebLLMModel(model)?.quoteMaxToken || 3000;
|
||||
|
||||
maxTokens = Math.max(maxTokens, quoteMaxToken);
|
||||
}
|
||||
|
@@ -11,6 +11,7 @@ import { useSystemStore } from '@/web/common/system/useSystemStore';
|
||||
import SearchParamsTip from '@/components/core/dataset/SearchParamsTip';
|
||||
import { useContextSelector } from 'use-context-selector';
|
||||
import { WorkflowContext } from '@/pages/app/detail/components/WorkflowComponents/context';
|
||||
import { getWebLLMModel } from '@/web/common/system/utils';
|
||||
|
||||
const SelectDatasetParam = ({ inputs = [], nodeId }: RenderInputProps) => {
|
||||
const onChangeNode = useContextSelector(WorkflowContext, (v) => v.onChangeNode);
|
||||
@@ -36,8 +37,7 @@ const SelectDatasetParam = ({ inputs = [], nodeId }: RenderInputProps) => {
|
||||
if (item.flowNodeType === FlowNodeTypeEnum.chatNode) {
|
||||
const model =
|
||||
item.inputs.find((item) => item.key === NodeInputKeyEnum.aiModel)?.value || '';
|
||||
const quoteMaxToken =
|
||||
llmModelList.find((item) => item.model === model)?.quoteMaxToken || 3000;
|
||||
const quoteMaxToken = getWebLLMModel(model)?.quoteMaxToken || 3000;
|
||||
|
||||
maxTokens = Math.max(maxTokens, quoteMaxToken);
|
||||
}
|
||||
|
@@ -338,7 +338,7 @@ export async function getServerSideProps(context: any) {
|
||||
props: {
|
||||
appId: context?.query?.appId || '',
|
||||
chatId: context?.query?.chatId || '',
|
||||
...(await serviceSideProps(context, ['file', 'app', 'chat']))
|
||||
...(await serviceSideProps(context, ['file', 'app', 'chat', 'workflow']))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@@ -394,7 +394,7 @@ export async function getServerSideProps(context: any) {
|
||||
appIntro: app?.appId?.intro ?? 'intro',
|
||||
shareId: shareId ?? '',
|
||||
authToken: authToken ?? '',
|
||||
...(await serviceSideProps(context, ['file', 'app', 'chat']))
|
||||
...(await serviceSideProps(context, ['file', 'app', 'chat', 'workflow']))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@@ -337,7 +337,7 @@ export async function getServerSideProps(context: any) {
|
||||
chatId: context?.query?.chatId || '',
|
||||
teamId: context?.query?.teamId || '',
|
||||
teamToken: context?.query?.teamToken || '',
|
||||
...(await serviceSideProps(context, ['file', 'app', 'chat']))
|
||||
...(await serviceSideProps(context, ['file', 'app', 'chat', 'workflow']))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
Reference in New Issue
Block a user