mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-28 09:03:53 +00:00
File input (#2270)
* doc * feat: file upload config * perf: chat box file params * feat: markdown show file * feat: chat file store and clear * perf: read file contentType * feat: llm vision config * feat: file url output * perf: plugin error text * perf: image load * feat: ai chat document * perf: file block ui * feat: read file node * feat: file read response field * feat: simple mode support read files * feat: tool call * feat: read file histories * perf: select file * perf: select file config * i18n * i18n * fix: ts; feat: tool response preview result
This commit is contained in:
@@ -14,6 +14,7 @@ import { checkInvalidChunkAndLock } from '@fastgpt/service/core/dataset/training
|
||||
import { addMinutes } from 'date-fns';
|
||||
import { countGptMessagesTokens } from '@fastgpt/service/common/string/tiktoken/index';
|
||||
import { pushDataListToTrainingQueueByCollectionId } from '@fastgpt/service/core/dataset/training/controller';
|
||||
import { loadRequestMessages } from '@fastgpt/service/core/chat/utils';
|
||||
|
||||
const reduceQueue = () => {
|
||||
global.qaQueueLen = global.qaQueueLen > 0 ? global.qaQueueLen - 1 : 0;
|
||||
@@ -113,7 +114,7 @@ ${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
|
||||
const chatResponse = await ai.chat.completions.create({
|
||||
model,
|
||||
temperature: 0.3,
|
||||
messages,
|
||||
messages: await loadRequestMessages({ messages, useVision: false }),
|
||||
stream: false
|
||||
});
|
||||
const answer = chatResponse.choices?.[0].message?.content || '';
|
||||
|
Reference in New Issue
Block a user