mirror of
https://github.com/labring/FastGPT.git
synced 2025-08-01 03:48:24 +00:00
v4.6.4-Outlink (#589)
This commit is contained in:
@@ -3,7 +3,7 @@ import { GET } from '@fastgpt/service/common/api/plusRequest';
|
||||
|
||||
export async function getSimpleTemplatesFromPlus(): Promise<AppSimpleEditConfigTemplateType[]> {
|
||||
try {
|
||||
if (!global.systemEnv.pluginBaseUrl) return [];
|
||||
if (!global.systemEnv?.pluginBaseUrl) return [];
|
||||
|
||||
return GET<AppSimpleEditConfigTemplateType[]>('/core/app/getSimpleTemplates');
|
||||
} catch (error) {
|
||||
|
@@ -112,7 +112,7 @@ export async function searchDatasetData(props: SearchProps) {
|
||||
limit: maxTokens,
|
||||
searchMode = DatasetSearchModeEnum.embedding
|
||||
} = props;
|
||||
searchMode = global.systemEnv.pluginBaseUrl ? searchMode : DatasetSearchModeEnum.embedding;
|
||||
searchMode = global.systemEnv?.pluginBaseUrl ? searchMode : DatasetSearchModeEnum.embedding;
|
||||
|
||||
// Compatible with topk limit
|
||||
if (maxTokens < 50) {
|
||||
|
@@ -10,11 +10,12 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { Prompt_CQJson } from '@/global/core/prompt/agent';
|
||||
import { FunctionModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getCQModel } from '@/service/core/ai/model';
|
||||
import { getHistories } from '../utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.aiModel]: string;
|
||||
[ModuleInputKeyEnum.aiSystemPrompt]?: string;
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[];
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[ModuleInputKeyEnum.userChatInput]: string;
|
||||
[ModuleInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
|
||||
}>;
|
||||
@@ -23,13 +24,14 @@ type CQResponse = {
|
||||
[key: string]: any;
|
||||
};
|
||||
|
||||
const agentFunName = 'agent_user_question';
|
||||
const agentFunName = 'classify_question';
|
||||
|
||||
/* request openai chat */
|
||||
export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse> => {
|
||||
const {
|
||||
user,
|
||||
inputs: { model, agents, userChatInput }
|
||||
histories,
|
||||
inputs: { model, history = 6, agents, userChatInput }
|
||||
} = props as Props;
|
||||
|
||||
if (!userChatInput) {
|
||||
@@ -42,11 +44,13 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
|
||||
if (cqModel.functionCall) {
|
||||
return functionCall({
|
||||
...props,
|
||||
histories: getHistories(history, histories),
|
||||
cqModel
|
||||
});
|
||||
}
|
||||
return completions({
|
||||
...props,
|
||||
histories: getHistories(history, histories),
|
||||
cqModel
|
||||
});
|
||||
})();
|
||||
@@ -54,7 +58,7 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
|
||||
const result = agents.find((item) => item.key === arg?.type) || agents[agents.length - 1];
|
||||
|
||||
return {
|
||||
[result.key]: 1,
|
||||
[result.key]: result.value,
|
||||
[ModuleOutputKeyEnum.responseData]: {
|
||||
price: user.openaiAccount?.key ? 0 : cqModel.price * tokens,
|
||||
model: cqModel.name || '',
|
||||
@@ -69,18 +73,19 @@ export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse
|
||||
async function functionCall({
|
||||
user,
|
||||
cqModel,
|
||||
inputs: { agents, systemPrompt, history = [], userChatInput }
|
||||
histories,
|
||||
inputs: { agents, systemPrompt, userChatInput }
|
||||
}: Props & { cqModel: FunctionModelItemType }) {
|
||||
const messages: ChatItemType[] = [
|
||||
...history,
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: systemPrompt
|
||||
? `补充的背景知识:
|
||||
"""
|
||||
? `<背景知识>
|
||||
${systemPrompt}
|
||||
"""
|
||||
我的问题: ${userChatInput}
|
||||
</背景知识>
|
||||
|
||||
问题: "${userChatInput}"
|
||||
`
|
||||
: userChatInput
|
||||
}
|
||||
@@ -95,18 +100,19 @@ ${systemPrompt}
|
||||
// function body
|
||||
const agentFunction = {
|
||||
name: agentFunName,
|
||||
description: '请根据对话记录及补充的背景知识,判断用户的问题类型,并返回对应的字段',
|
||||
description: '根据对话记录及补充的背景知识,对问题进行分类,并返回对应的类型字段',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
description: `判断用户的问题类型,并返回对应的字段。下面是几种问题类型: ${agents
|
||||
description: `问题类型。下面是几种可选的问题类型: ${agents
|
||||
.map((item) => `${item.value},返回:'${item.key}'`)
|
||||
.join(';')}`,
|
||||
enum: agents.map((item) => item.key)
|
||||
}
|
||||
}
|
||||
},
|
||||
required: ['type']
|
||||
}
|
||||
};
|
||||
const ai = getAIApi(user.openaiAccount, 48000);
|
||||
@@ -115,12 +121,19 @@ ${systemPrompt}
|
||||
model: cqModel.model,
|
||||
temperature: 0,
|
||||
messages: [...adaptMessages],
|
||||
function_call: { name: agentFunName },
|
||||
functions: [agentFunction]
|
||||
tools: [
|
||||
{
|
||||
type: 'function',
|
||||
function: agentFunction
|
||||
}
|
||||
],
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
});
|
||||
|
||||
try {
|
||||
const arg = JSON.parse(response.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
const arg = JSON.parse(
|
||||
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || ''
|
||||
);
|
||||
|
||||
return {
|
||||
arg,
|
||||
@@ -130,7 +143,7 @@ ${systemPrompt}
|
||||
console.log(agentFunction.parameters);
|
||||
console.log(response.choices?.[0]?.message);
|
||||
|
||||
console.log('Your model may not support function_call', error);
|
||||
console.log('Your model may not support toll_call', error);
|
||||
|
||||
return {
|
||||
arg: {},
|
||||
@@ -142,15 +155,16 @@ ${systemPrompt}
|
||||
async function completions({
|
||||
cqModel,
|
||||
user,
|
||||
inputs: { agents, systemPrompt = '', history = [], userChatInput }
|
||||
histories,
|
||||
inputs: { agents, systemPrompt = '', userChatInput }
|
||||
}: Props & { cqModel: FunctionModelItemType }) {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: replaceVariable(cqModel.functionPrompt || Prompt_CQJson, {
|
||||
systemPrompt,
|
||||
typeList: agents.map((item) => `ID: "${item.key}", 问题类型:${item.value}`).join('\n'),
|
||||
text: `${history.map((item) => `${item.obj}:${item.value}`).join('\n')}
|
||||
typeList: agents.map((item) => `{"${item.value}": ${item.key}}`).join('\n'),
|
||||
text: `${histories.map((item) => `${item.obj}:${item.value}`).join('\n')}
|
||||
Human:${userChatInput}`
|
||||
})
|
||||
}
|
||||
|
@@ -9,6 +9,7 @@ import type { ModuleDispatchProps } from '@/types/core/chat/type';
|
||||
import { Prompt_ExtractJson } from '@/global/core/prompt/agent';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
import { FunctionModelItemType } from '@fastgpt/global/core/ai/model.d';
|
||||
import { getHistories } from '../utils';
|
||||
|
||||
type Props = ModuleDispatchProps<{
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[];
|
||||
@@ -23,12 +24,13 @@ type Response = {
|
||||
[ModuleOutputKeyEnum.responseData]: moduleDispatchResType;
|
||||
};
|
||||
|
||||
const agentFunName = 'agent_extract_data';
|
||||
const agentFunName = 'extract_json_data';
|
||||
|
||||
export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
const {
|
||||
user,
|
||||
inputs: { content, description, extractKeys }
|
||||
histories,
|
||||
inputs: { content, history = 6, description, extractKeys }
|
||||
} = props;
|
||||
|
||||
if (!content) {
|
||||
@@ -41,11 +43,13 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
if (extractModel.functionCall) {
|
||||
return functionCall({
|
||||
...props,
|
||||
histories: getHistories(history, histories),
|
||||
extractModel
|
||||
});
|
||||
}
|
||||
return completions({
|
||||
...props,
|
||||
histories: getHistories(history, histories),
|
||||
extractModel
|
||||
});
|
||||
})();
|
||||
@@ -88,13 +92,24 @@ export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
async function functionCall({
|
||||
extractModel,
|
||||
user,
|
||||
inputs: { history = [], content, extractKeys, description }
|
||||
histories,
|
||||
inputs: { content, extractKeys, description }
|
||||
}: Props & { extractModel: FunctionModelItemType }) {
|
||||
const messages: ChatItemType[] = [
|
||||
...history,
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: content
|
||||
value: `<任务描述>
|
||||
${description || '根据用户要求提取适当的 JSON 字符串。'}
|
||||
|
||||
- 如果字段为空,你返回空字符串。
|
||||
- 不要换行。
|
||||
- 结合历史记录和文本进行提取。
|
||||
</任务描述>
|
||||
|
||||
<文本>
|
||||
${content}
|
||||
</文本>`
|
||||
}
|
||||
];
|
||||
const filterMessages = ChatContextFilter({
|
||||
@@ -120,7 +135,7 @@ async function functionCall({
|
||||
// function body
|
||||
const agentFunction = {
|
||||
name: agentFunName,
|
||||
description: `${description}\n如果内容不存在,返回空字符串。`,
|
||||
description,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties,
|
||||
@@ -134,17 +149,24 @@ async function functionCall({
|
||||
model: extractModel.model,
|
||||
temperature: 0,
|
||||
messages: [...adaptMessages],
|
||||
function_call: { name: agentFunName },
|
||||
functions: [agentFunction]
|
||||
tools: [
|
||||
{
|
||||
type: 'function',
|
||||
function: agentFunction
|
||||
}
|
||||
],
|
||||
tool_choice: { type: 'function', function: { name: agentFunName } }
|
||||
});
|
||||
|
||||
const arg: Record<string, any> = (() => {
|
||||
try {
|
||||
return JSON.parse(response.choices?.[0]?.message?.function_call?.arguments || '{}');
|
||||
return JSON.parse(
|
||||
response?.choices?.[0]?.message?.tool_calls?.[0]?.function?.arguments || '{}'
|
||||
);
|
||||
} catch (error) {
|
||||
console.log(agentFunction.parameters);
|
||||
console.log(response.choices?.[0]?.message);
|
||||
console.log('Your model may not support function_call', error);
|
||||
console.log(response.choices?.[0]?.message?.tool_calls?.[0]?.function);
|
||||
console.log('Your model may not support tool_call', error);
|
||||
return {};
|
||||
}
|
||||
})();
|
||||
@@ -159,7 +181,8 @@ async function functionCall({
|
||||
async function completions({
|
||||
extractModel,
|
||||
user,
|
||||
inputs: { history = [], content, extractKeys, description }
|
||||
histories,
|
||||
inputs: { content, extractKeys, description }
|
||||
}: Props & { extractModel: FunctionModelItemType }) {
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
@@ -169,12 +192,10 @@ async function completions({
|
||||
json: extractKeys
|
||||
.map(
|
||||
(item) =>
|
||||
`key="${item.key}",描述="${item.desc}",required="${
|
||||
item.required ? 'true' : 'false'
|
||||
}"`
|
||||
`{"key":"${item.key}", "description":"${item.required}", "required":${item.required}}}`
|
||||
)
|
||||
.join('\n'),
|
||||
text: `${history.map((item) => `${item.obj}:${item.value}`).join('\n')}
|
||||
text: `${histories.map((item) => `${item.obj}:${item.value}`).join('\n')}
|
||||
Human: ${content}`
|
||||
})
|
||||
}
|
||||
|
@@ -22,11 +22,12 @@ import { getChatModel, ModelTypeEnum } from '@/service/core/ai/model';
|
||||
import type { SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type';
|
||||
import { formatStr2ChatContent } from '@fastgpt/service/core/chat/utils';
|
||||
import { ModuleInputKeyEnum, ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { getHistories } from '../utils';
|
||||
|
||||
export type ChatProps = ModuleDispatchProps<
|
||||
AIChatModuleProps & {
|
||||
[ModuleInputKeyEnum.userChatInput]: string;
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[];
|
||||
[ModuleInputKeyEnum.history]?: ChatItemType[] | number;
|
||||
[ModuleInputKeyEnum.aiChatDatasetQuote]?: SearchDataResponseItemType[];
|
||||
}
|
||||
>;
|
||||
@@ -43,12 +44,13 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
stream = false,
|
||||
detail = false,
|
||||
user,
|
||||
histories,
|
||||
outputs,
|
||||
inputs: {
|
||||
model,
|
||||
temperature = 0,
|
||||
maxToken = 4000,
|
||||
history = [],
|
||||
history = 6,
|
||||
quoteQA = [],
|
||||
userChatInput,
|
||||
isResponseAnswerText = true,
|
||||
@@ -63,6 +65,8 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
stream = stream && isResponseAnswerText;
|
||||
|
||||
const chatHistories = getHistories(history, histories);
|
||||
|
||||
// temperature adapt
|
||||
const modelConstantsData = getChatModel(model);
|
||||
|
||||
@@ -88,7 +92,7 @@ export const dispatchChatCompletion = async (props: ChatProps): Promise<ChatResp
|
||||
|
||||
const { messages, filterMessages } = getChatMessages({
|
||||
model: modelConstantsData,
|
||||
history,
|
||||
histories: chatHistories,
|
||||
quoteText,
|
||||
quotePrompt,
|
||||
userChatInput,
|
||||
@@ -265,14 +269,14 @@ function filterQuote({
|
||||
function getChatMessages({
|
||||
quotePrompt,
|
||||
quoteText,
|
||||
history = [],
|
||||
histories = [],
|
||||
systemPrompt,
|
||||
userChatInput,
|
||||
model
|
||||
}: {
|
||||
quotePrompt?: string;
|
||||
quoteText: string;
|
||||
history: ChatProps['inputs']['history'];
|
||||
histories: ChatItemType[];
|
||||
systemPrompt: string;
|
||||
userChatInput: string;
|
||||
model: ChatModelItemType;
|
||||
@@ -293,7 +297,7 @@ function getChatMessages({
|
||||
}
|
||||
]
|
||||
: []),
|
||||
...history,
|
||||
...histories,
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: question
|
||||
@@ -319,7 +323,7 @@ function getMaxTokens({
|
||||
}: {
|
||||
maxToken: number;
|
||||
model: ChatModelItemType;
|
||||
filterMessages: ChatProps['inputs']['history'];
|
||||
filterMessages: ChatItemType[];
|
||||
}) {
|
||||
const tokensLimit = model.maxContext;
|
||||
|
||||
|
@@ -3,8 +3,8 @@ import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import { RunningModuleItemType } from '@/types/app';
|
||||
import { ModuleDispatchProps } from '@/types/core/chat/type';
|
||||
import type { ChatHistoryItemResType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
|
||||
import type { ChatHistoryItemResType, ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import { FlowNodeInputTypeEnum, FlowNodeTypeEnum } from '@fastgpt/global/core/module/node/constant';
|
||||
import { ModuleItemType } from '@fastgpt/global/core/module/type';
|
||||
import { UserType } from '@fastgpt/global/support/user/type';
|
||||
import { replaceVariable } from '@fastgpt/global/common/string/tools';
|
||||
@@ -25,7 +25,6 @@ import { dispatchAppRequest } from './tools/runApp';
|
||||
import { dispatchRunPlugin } from './plugin/run';
|
||||
import { dispatchPluginInput } from './plugin/runInput';
|
||||
import { dispatchPluginOutput } from './plugin/runOutput';
|
||||
import { AuthUserTypeEnum } from '@fastgpt/global/support/permission/constant';
|
||||
|
||||
/* running */
|
||||
export async function dispatchModules({
|
||||
@@ -36,7 +35,8 @@ export async function dispatchModules({
|
||||
appId,
|
||||
modules,
|
||||
chatId,
|
||||
params = {},
|
||||
histories = [],
|
||||
startParams = {},
|
||||
variables = {},
|
||||
stream = false,
|
||||
detail = false
|
||||
@@ -48,7 +48,8 @@ export async function dispatchModules({
|
||||
appId: string;
|
||||
modules: ModuleItemType[];
|
||||
chatId?: string;
|
||||
params?: Record<string, any>;
|
||||
histories: ChatItemType[];
|
||||
startParams?: Record<string, any>;
|
||||
variables?: Record<string, any>;
|
||||
stream?: boolean;
|
||||
detail?: boolean;
|
||||
@@ -185,6 +186,7 @@ export async function dispatchModules({
|
||||
stream,
|
||||
detail,
|
||||
variables,
|
||||
histories,
|
||||
outputs: module.outputs,
|
||||
inputs: params
|
||||
};
|
||||
@@ -230,7 +232,12 @@ export async function dispatchModules({
|
||||
|
||||
// start process width initInput
|
||||
const initModules = runningModules.filter((item) => initRunningModuleType[item.flowType]);
|
||||
initModules.map((module) => moduleInput(module, params));
|
||||
initModules.map((module) =>
|
||||
moduleInput(module, {
|
||||
...startParams,
|
||||
history: [] // abandon history field. History module will get histories from other fields.
|
||||
})
|
||||
);
|
||||
await checkModulesCanRun(initModules);
|
||||
|
||||
// focus try to run pluginOutput
|
||||
@@ -252,45 +259,54 @@ function loadModules(
|
||||
modules: ModuleItemType[],
|
||||
variables: Record<string, any>
|
||||
): RunningModuleItemType[] {
|
||||
return modules.map((module) => {
|
||||
return {
|
||||
moduleId: module.moduleId,
|
||||
name: module.name,
|
||||
flowType: module.flowType,
|
||||
showStatus: module.showStatus,
|
||||
inputs: module.inputs
|
||||
.filter((item) => item.connected || item.value !== undefined) // filter unconnected target input
|
||||
.map((item) => {
|
||||
if (typeof item.value !== 'string') {
|
||||
return modules
|
||||
.filter((item) => {
|
||||
return ![FlowNodeTypeEnum.userGuide].includes(item.moduleId as any);
|
||||
})
|
||||
.map((module) => {
|
||||
return {
|
||||
moduleId: module.moduleId,
|
||||
name: module.name,
|
||||
flowType: module.flowType,
|
||||
showStatus: module.showStatus,
|
||||
inputs: module.inputs
|
||||
.filter(
|
||||
(item) =>
|
||||
item.type === FlowNodeInputTypeEnum.systemInput ||
|
||||
item.connected ||
|
||||
item.value !== undefined
|
||||
) // filter unconnected target input
|
||||
.map((item) => {
|
||||
if (typeof item.value !== 'string') {
|
||||
return {
|
||||
key: item.key,
|
||||
value: item.value
|
||||
};
|
||||
}
|
||||
|
||||
// variables replace
|
||||
const replacedVal = replaceVariable(item.value, variables);
|
||||
|
||||
return {
|
||||
key: item.key,
|
||||
value: item.value
|
||||
value: replacedVal
|
||||
};
|
||||
}
|
||||
|
||||
// variables replace
|
||||
const replacedVal = replaceVariable(item.value, variables);
|
||||
|
||||
return {
|
||||
}),
|
||||
outputs: module.outputs
|
||||
.map((item) => ({
|
||||
key: item.key,
|
||||
value: replacedVal
|
||||
};
|
||||
}),
|
||||
outputs: module.outputs
|
||||
.map((item) => ({
|
||||
key: item.key,
|
||||
answer: item.key === ModuleOutputKeyEnum.answerText,
|
||||
value: undefined,
|
||||
targets: item.targets
|
||||
}))
|
||||
.sort((a, b) => {
|
||||
// finish output always at last
|
||||
if (a.key === ModuleOutputKeyEnum.finish) return 1;
|
||||
if (b.key === ModuleOutputKeyEnum.finish) return -1;
|
||||
return 0;
|
||||
})
|
||||
};
|
||||
});
|
||||
answer: item.key === ModuleOutputKeyEnum.answerText,
|
||||
value: undefined,
|
||||
targets: item.targets
|
||||
}))
|
||||
.sort((a, b) => {
|
||||
// finish output always at last
|
||||
if (a.key === ModuleOutputKeyEnum.finish) return 1;
|
||||
if (b.key === ModuleOutputKeyEnum.finish) return -1;
|
||||
return 0;
|
||||
})
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/* sse response modules staus */
|
||||
|
@@ -1,17 +1,19 @@
|
||||
import { ModuleInputKeyEnum } from '@fastgpt/global/core/module/constants';
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
import type { ModuleDispatchProps } from '@/types/core/chat/type';
|
||||
import { getHistories } from '../utils';
|
||||
export type HistoryProps = ModuleDispatchProps<{
|
||||
maxContext: number;
|
||||
maxContext?: number;
|
||||
[ModuleInputKeyEnum.history]: ChatItemType[];
|
||||
}>;
|
||||
|
||||
export const dispatchHistory = (props: Record<string, any>) => {
|
||||
const {
|
||||
inputs: { maxContext = 5, history = [] }
|
||||
histories,
|
||||
inputs: { maxContext }
|
||||
} = props as HistoryProps;
|
||||
|
||||
return {
|
||||
history: maxContext > 0 ? history.slice(-maxContext) : []
|
||||
history: getHistories(maxContext, histories)
|
||||
};
|
||||
};
|
||||
|
@@ -35,7 +35,7 @@ export const dispatchRunPlugin = async (props: RunPluginProps): Promise<RunPlugi
|
||||
...module,
|
||||
showStatus: false
|
||||
})),
|
||||
params: data
|
||||
startParams: data
|
||||
});
|
||||
|
||||
const output = responseData.find((item) => item.moduleType === FlowNodeTypeEnum.pluginOutput);
|
||||
|
@@ -56,8 +56,8 @@ export const dispatchAppRequest = async (props: Props): Promise<Response> => {
|
||||
...props,
|
||||
appId: app.id,
|
||||
modules: appData.modules,
|
||||
params: {
|
||||
history,
|
||||
histories: history,
|
||||
startParams: {
|
||||
userChatInput
|
||||
}
|
||||
});
|
||||
|
9
projects/app/src/service/moduleDispatch/utils.ts
Normal file
9
projects/app/src/service/moduleDispatch/utils.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
|
||||
|
||||
export const getHistories = (history?: ChatItemType[] | number, histories: ChatItemType[] = []) => {
|
||||
if (!history) return [];
|
||||
if (typeof history === 'number') return histories.slice(-history);
|
||||
if (Array.isArray(history)) return history;
|
||||
|
||||
return [];
|
||||
};
|
@@ -1,7 +1,7 @@
|
||||
import { GET } from '@fastgpt/service/common/api/plusRequest';
|
||||
|
||||
export const authTeamBalance = async (teamId: string) => {
|
||||
if (global.systemEnv.pluginBaseUrl) {
|
||||
if (global.systemEnv?.pluginBaseUrl) {
|
||||
return GET('/support/permission/authBalance', { teamId });
|
||||
}
|
||||
return true;
|
||||
|
@@ -11,12 +11,14 @@ import { TeamMemberRoleEnum } from '@fastgpt/global/support/user/team/constant';
|
||||
token: team owner and chat owner have all permissions
|
||||
*/
|
||||
export async function autChatCrud({
|
||||
appId,
|
||||
chatId,
|
||||
shareId,
|
||||
outLinkUid,
|
||||
per = 'owner',
|
||||
...props
|
||||
}: AuthModeType & {
|
||||
appId: string;
|
||||
chatId?: string;
|
||||
shareId?: string;
|
||||
outLinkUid?: string;
|
||||
@@ -28,7 +30,7 @@ export async function autChatCrud({
|
||||
const isOutLink = Boolean(shareId && outLinkUid);
|
||||
if (!chatId) return { isOutLink, uid: outLinkUid };
|
||||
|
||||
const chat = await MongoChat.findOne({ chatId }).lean();
|
||||
const chat = await MongoChat.findOne({ appId, chatId }).lean();
|
||||
|
||||
if (!chat) return { isOutLink, uid: outLinkUid };
|
||||
|
||||
|
@@ -2,6 +2,6 @@ import { POST } from '@fastgpt/service/common/api/plusRequest';
|
||||
import { SendInformProps } from '@fastgpt/global/support/user/inform/type';
|
||||
|
||||
export function sendOneInform(data: SendInformProps) {
|
||||
if (!global.systemEnv.pluginBaseUrl) return;
|
||||
if (!global.systemEnv?.pluginBaseUrl) return;
|
||||
return POST('/support/user/inform/create', data);
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@ import { defaultQGModels } from '@fastgpt/global/core/ai/model';
|
||||
import { POST } from '@fastgpt/service/common/api/plusRequest';
|
||||
|
||||
export function createBill(data: CreateBillProps) {
|
||||
if (!global.systemEnv.pluginBaseUrl) return;
|
||||
if (!global.systemEnv?.pluginBaseUrl) return;
|
||||
if (data.total === 0) {
|
||||
addLog.info('0 Bill', data);
|
||||
}
|
||||
@@ -17,7 +17,7 @@ export function createBill(data: CreateBillProps) {
|
||||
} catch (error) {}
|
||||
}
|
||||
export function concatBill(data: ConcatBillProps) {
|
||||
if (!global.systemEnv.pluginBaseUrl) return;
|
||||
if (!global.systemEnv?.pluginBaseUrl) return;
|
||||
if (data.total === 0) {
|
||||
addLog.info('0 Bill', data);
|
||||
}
|
||||
|
Reference in New Issue
Block a user