4.6.5- CoreferenceResolution Module (#631)

This commit is contained in:
Archer
2023-12-22 10:47:31 +08:00
committed by GitHub
parent 41115a96c0
commit cd682d4275
112 changed files with 4163 additions and 2700 deletions

View File

@@ -28,7 +28,7 @@ export const simpleMarkdownText = (rawText: string) => {
['####', '###', '##', '#', '```', '~~~'].forEach((item, i) => {
const reg = new RegExp(`\\n\\s*${item}`, 'g');
if (reg.test(rawText)) {
rawText = rawText.replace(new RegExp(`(\\n)\\s*(${item})`, 'g'), '$1$2');
rawText = rawText.replace(new RegExp(`(\\n)( *)(${item})`, 'g'), '$1$3');
}
});

View File

@@ -14,7 +14,7 @@ export type ChatModelItemType = LLMModelItemType & {
};
export type FunctionModelItemType = LLMModelItemType & {
functionCall: boolean;
toolChoice: boolean;
functionPrompt: string;
};

View File

@@ -1,63 +1,5 @@
import type {
LLMModelItemType,
ChatModelItemType,
FunctionModelItemType,
VectorModelItemType,
AudioSpeechModelType,
WhisperModelType,
ReRankModelItemType
} from './model.d';
import type { LLMModelItemType, VectorModelItemType } from './model.d';
export const defaultChatModels: ChatModelItemType[] = [
{
model: 'gpt-3.5-turbo-1106',
name: 'GPT35-1106',
price: 0,
maxContext: 16000,
maxResponse: 4000,
quoteMaxToken: 2000,
maxTemperature: 1.2,
censor: false,
vision: false,
defaultSystemChatPrompt: ''
},
{
model: 'gpt-3.5-turbo-16k',
name: 'GPT35-16k',
maxContext: 16000,
maxResponse: 16000,
price: 0,
quoteMaxToken: 8000,
maxTemperature: 1.2,
censor: false,
vision: false,
defaultSystemChatPrompt: ''
},
{
model: 'gpt-4',
name: 'GPT4-8k',
maxContext: 8000,
maxResponse: 8000,
price: 0,
quoteMaxToken: 4000,
maxTemperature: 1.2,
censor: false,
vision: false,
defaultSystemChatPrompt: ''
},
{
model: 'gpt-4-vision-preview',
name: 'GPT4-Vision',
maxContext: 128000,
maxResponse: 4000,
price: 0,
quoteMaxToken: 100000,
maxTemperature: 1.2,
censor: false,
vision: true,
defaultSystemChatPrompt: ''
}
];
export const defaultQAModels: LLMModelItemType[] = [
{
model: 'gpt-3.5-turbo-16k',
@@ -67,46 +9,6 @@ export const defaultQAModels: LLMModelItemType[] = [
price: 0
}
];
export const defaultCQModels: FunctionModelItemType[] = [
{
model: 'gpt-3.5-turbo-1106',
name: 'GPT35-1106',
maxContext: 16000,
maxResponse: 4000,
price: 0,
functionCall: true,
functionPrompt: ''
},
{
model: 'gpt-4',
name: 'GPT4-8k',
maxContext: 8000,
maxResponse: 8000,
price: 0,
functionCall: true,
functionPrompt: ''
}
];
export const defaultExtractModels: FunctionModelItemType[] = [
{
model: 'gpt-3.5-turbo-1106',
name: 'GPT35-1106',
maxContext: 16000,
maxResponse: 4000,
price: 0,
functionCall: true,
functionPrompt: ''
}
];
export const defaultQGModels: LLMModelItemType[] = [
{
model: 'gpt-3.5-turbo-1106',
name: 'GPT35-1106',
maxContext: 1600,
maxResponse: 4000,
price: 0
}
];
export const defaultVectorModels: VectorModelItemType[] = [
{
@@ -117,27 +19,3 @@ export const defaultVectorModels: VectorModelItemType[] = [
maxToken: 3000
}
];
export const defaultReRankModels: ReRankModelItemType[] = [];
export const defaultAudioSpeechModels: AudioSpeechModelType[] = [
{
model: 'tts-1',
name: 'OpenAI TTS1',
price: 0,
voices: [
{ label: 'Alloy', value: 'Alloy', bufferId: 'openai-Alloy' },
{ label: 'Echo', value: 'Echo', bufferId: 'openai-Echo' },
{ label: 'Fable', value: 'Fable', bufferId: 'openai-Fable' },
{ label: 'Onyx', value: 'Onyx', bufferId: 'openai-Onyx' },
{ label: 'Nova', value: 'Nova', bufferId: 'openai-Nova' },
{ label: 'Shimmer', value: 'Shimmer', bufferId: 'openai-Shimmer' }
]
}
];
export const defaultWhisperModel: WhisperModelType = {
model: 'whisper-1',
name: 'Whisper1',
price: 0
};

View File

@@ -67,6 +67,9 @@ export type AppSimpleEditFormType = {
searchMode: `${DatasetSearchModeEnum}`;
searchEmptyText: string;
};
cfr: {
background: string;
};
userGuide: {
welcomeText: string;
variables: {
@@ -111,6 +114,9 @@ export type AppSimpleEditConfigTemplateType = {
searchMode: `${DatasetSearchModeEnum}`;
searchEmptyText?: boolean;
};
cfr?: {
background?: boolean;
};
userGuide?: {
welcomeText?: boolean;
variables?: boolean;

View File

@@ -3,23 +3,23 @@ import { FlowNodeTypeEnum } from '../module/node/constant';
import { ModuleOutputKeyEnum, ModuleInputKeyEnum } from '../module/constants';
import type { FlowNodeInputItemType } from '../module/node/type.d';
import { getGuideModule, splitGuideModule } from '../module/utils';
import { defaultChatModels } from '../ai/model';
import { ModuleItemType } from '../module/type.d';
import { DatasetSearchModeEnum } from '../dataset/constant';
export const getDefaultAppForm = (templateId = 'fastgpt-universal'): AppSimpleEditFormType => {
const defaultChatModel = defaultChatModels[0];
return {
templateId,
aiSettings: {
model: defaultChatModel?.model,
model: 'gpt-3.5-turbo',
systemPrompt: '',
temperature: 0,
isResponseAnswerText: true,
quotePrompt: '',
quoteTemplate: '',
maxToken: defaultChatModel ? defaultChatModel.maxResponse / 2 : 4000
maxToken: 4000
},
cfr: {
background: ''
},
dataset: {
datasets: [],
@@ -116,6 +116,11 @@ export const appModules2Form = ({
questionGuide: questionGuide,
tts: ttsConfig
};
} else if (module.flowType === FlowNodeTypeEnum.cfr) {
const value = module.inputs.find((item) => item.key === ModuleInputKeyEnum.aiSystemPrompt);
if (value) {
defaultAppForm.cfr.background = value.value;
}
}
});

View File

@@ -93,6 +93,7 @@ export type moduleDispatchResType = {
model?: string;
query?: string;
contextTotalLen?: number;
textOutput?: string;
// chat
temperature?: number;
@@ -119,9 +120,7 @@ export type moduleDispatchResType = {
// plugin output
pluginOutput?: Record<string, any>;
// text editor
textOutput?: string;
pluginDetail?: ChatHistoryItemResType[];
// tf switch
tfSwitchResult?: boolean;

View File

@@ -38,7 +38,6 @@ export enum FlowNodeOutputTypeEnum {
}
export enum FlowNodeTypeEnum {
empty = 'empty',
userGuide = 'userGuide',
questionInput = 'questionInput',
historyNode = 'historyNode',
@@ -52,10 +51,10 @@ export enum FlowNodeTypeEnum {
pluginModule = 'pluginModule',
pluginInput = 'pluginInput',
pluginOutput = 'pluginOutput',
textEditor = 'textEditor',
cfr = 'cfr',
// abandon
variable = 'variable'
}
export const EDGE_TYPE = 'smoothstep';
export const EDGE_TYPE = 'default';

View File

@@ -141,7 +141,7 @@ export const AiChatModule: FlowModuleTemplateType = {
},
{
key: ModuleOutputKeyEnum.answerText,
label: 'AI回复',
label: 'AI回复内容',
description: '将在 stream 回复完毕后触发',
valueType: ModuleIOValueTypeEnum.string,
type: FlowNodeOutputTypeEnum.source,

View File

@@ -0,0 +1,61 @@
import {
FlowNodeInputTypeEnum,
FlowNodeOutputTypeEnum,
FlowNodeTypeEnum
} from '../../node/constant';
import { FlowModuleTemplateType } from '../../type.d';
import {
ModuleIOValueTypeEnum,
ModuleInputKeyEnum,
ModuleOutputKeyEnum,
ModuleTemplateTypeEnum
} from '../../constants';
import {
Input_Template_History,
Input_Template_Switch,
Input_Template_UserChatInput
} from '../input';
export const AiCFR: FlowModuleTemplateType = {
id: FlowNodeTypeEnum.chatNode,
templateType: ModuleTemplateTypeEnum.tools,
flowType: FlowNodeTypeEnum.cfr,
avatar: '/imgs/module/cfr.svg',
name: 'core.module.template.cfr',
intro: 'core.module.template.cfr intro',
showStatus: true,
inputs: [
Input_Template_Switch,
{
key: ModuleInputKeyEnum.aiModel,
type: FlowNodeInputTypeEnum.selectExtractModel,
label: 'core.module.input.label.aiModel',
required: true,
valueType: ModuleIOValueTypeEnum.string,
showTargetInApp: false,
showTargetInPlugin: false
},
{
key: ModuleInputKeyEnum.aiSystemPrompt,
type: FlowNodeInputTypeEnum.textarea,
label: 'core.module.input.label.cfr background',
max: 300,
valueType: ModuleIOValueTypeEnum.string,
description: 'core.module.input.description.cfr background',
placeholder: 'core.module.input.placeholder.cfr background',
showTargetInApp: true,
showTargetInPlugin: true
},
Input_Template_History,
Input_Template_UserChatInput
],
outputs: [
{
key: ModuleOutputKeyEnum.text,
label: 'core.module.output.label.cfr result',
valueType: ModuleIOValueTypeEnum.string,
type: FlowNodeOutputTypeEnum.source,
targets: []
}
]
};

View File

@@ -1,14 +0,0 @@
import { ModuleTemplateTypeEnum } from '../../constants';
import { FlowNodeTypeEnum } from '../../node/constant';
import { FlowModuleTemplateType } from '../../type.d';
export const EmptyModule: FlowModuleTemplateType = {
id: FlowNodeTypeEnum.empty,
templateType: ModuleTemplateTypeEnum.other,
flowType: FlowNodeTypeEnum.empty,
avatar: '/imgs/module/cq.png',
name: '该模块已被移除',
intro: '',
inputs: [],
outputs: []
};

View File

@@ -38,7 +38,7 @@ export type ModuleItemType = {
outputs: FlowNodeOutputItemType[];
};
/* function type */
/* --------------- function type -------------------- */
// variable
export type VariableItemType = {
id: string;
@@ -74,3 +74,46 @@ export type ContextExtractAgentItemType = {
required: boolean;
enum?: string;
};
/* -------------- running module -------------- */
export type RunningModuleItemType = {
name: ModuleItemType['name'];
moduleId: ModuleItemType['moduleId'];
flowType: ModuleItemType['flowType'];
showStatus?: ModuleItemType['showStatus'];
} & {
inputs: {
key: string;
value?: any;
}[];
outputs: {
key: string;
answer?: boolean;
response?: boolean;
value?: any;
targets: {
moduleId: string;
key: string;
}[];
}[];
};
export type ChatDispatchProps = {
res: NextApiResponse;
mode: 'test' | 'chat';
teamId: string;
tmbId: string;
user: UserType;
appId: string;
chatId?: string;
responseChatItemId?: string;
histories: ChatItemType[];
variables: Record<string, any>;
stream: boolean;
detail: boolean; // response detail
};
export type ModuleDispatchProps<T> = ChatDispatchProps & {
outputs: RunningModuleItemType['outputs'];
inputs: T;
};

View File

@@ -15,14 +15,19 @@ export type PluginItemSchema = {
};
/* plugin template */
export type PluginTemplateType = {
export type PluginTemplateType = PluginRuntimeType & {
author?: string;
id: string;
source: `${PluginSourceEnum}`;
templateType: FlowModuleTemplateType['templateType'];
intro: string;
modules: ModuleItemType[];
};
export type PluginRuntimeType = {
teamId?: string;
name: string;
avatar: string;
intro: string;
showStatus?: boolean;
modules: ModuleItemType[];
};