File input (#2270)

* doc

* feat: file upload  config

* perf: chat box file params

* feat: markdown show file

* feat: chat file store and clear

* perf: read file contentType

* feat: llm vision config

* feat: file url output

* perf: plugin error text

* perf: image load

* feat: ai chat document

* perf: file block ui

* feat: read file node

* feat: file read response field

* feat: simple mode support read files

* feat: tool call

* feat: read file histories

* perf: select file

* perf: select file config

* i18n

* i18n

* fix: ts; feat: tool response preview result
This commit is contained in:
Archer
2024-08-06 10:00:22 +08:00
committed by GitHub
parent 10dcdb5491
commit e36d9d794f
121 changed files with 2600 additions and 1142 deletions

View File

@@ -1,10 +1,6 @@
// @ts-nocheck
import type { NextApiResponse } from 'next';
import {
filterGPTMessageByMaxTokens,
formatGPTMessagesInRequestBefore,
loadChatImgToBase64
} from '../../../chat/utils';
import { filterGPTMessageByMaxTokens, loadRequestMessages } from '../../../chat/utils';
import type { ChatItemType, UserChatItemValueItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatRoleEnum } from '@fastgpt/global/core/chat/constants';
import { SseResponseEventEnum } from '@fastgpt/global/core/workflow/runtime/constants';
@@ -146,25 +142,17 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
}
]
: []),
...formatGPTMessagesInRequestBefore(filterMessages)
...filterMessages
] as ChatCompletionMessageParam[];
if (concatMessages.length === 0) {
return Promise.reject('core.chat.error.Messages empty');
}
const loadMessages = await Promise.all(
concatMessages.map(async (item) => {
if (item.role === ChatCompletionRequestMessageRoleEnum.User) {
return {
...item,
content: await loadChatImgToBase64(item.content)
};
} else {
return item;
}
})
);
const loadMessages = await loadRequestMessages({
messages: concatMessages,
useVision: false
});
const response = await ai.chat.completions.create(
{