feat: agent and ui

This commit is contained in:
archer
2023-07-06 19:35:02 +08:00
parent 46f20c7dc3
commit 23642af6e2
44 changed files with 588 additions and 1148 deletions

View File

@@ -79,7 +79,7 @@ export async function classifyQuestion({
properties: {
type: {
type: 'string',
description: agents.map((item) => `${item.desc},返回: '${item.key}'`).join('; '),
description: agents.map((item) => `${item.value},返回: '${item.key}'`).join('; '),
enum: agents.map((item) => item.key)
}
},
@@ -106,7 +106,10 @@ export async function classifyQuestion({
if (!arg.type) {
throw new Error('');
}
console.log(arg.type);
console.log(
'意图结果',
agents.findIndex((item) => item.key === arg.type)
);
return {
[arg.type]: 1

View File

@@ -10,6 +10,7 @@ import type { ChatItemType } from '@/types/chat';
import { ChatRoleEnum, sseResponseEventEnum } from '@/constants/chat';
import { parseStreamChunk, textAdaptGptResponse } from '@/utils/adapt';
import { getOpenAIApi, axiosConfig } from '@/service/ai/openai';
import { SpecificInputEnum } from '@/constants/app';
export type Props = {
model: `${OpenAiChatEnum}`;
@@ -22,7 +23,7 @@ export type Props = {
systemPrompt?: string;
limitPrompt?: string;
};
export type Response = { answer: string };
export type Response = { [SpecificInputEnum.answerText]: string };
export default async function handler(req: NextApiRequest, res: NextApiResponse) {
try {
@@ -132,7 +133,8 @@ export async function chatCompletion({
const chatAPI = getOpenAIApi();
/* count response max token */
const promptsToken = modelToolMap[model].countTokens({
const promptsToken = modelToolMap.countTokens({
model,
messages: filterMessages
});
maxToken = maxToken + promptsToken > modelTokenLimit ? modelTokenLimit - promptsToken : maxToken;
@@ -143,8 +145,8 @@ export async function chatCompletion({
temperature: Number(temperature || 0),
max_tokens: maxToken,
messages: adaptMessages,
frequency_penalty: 0.5, // 越大,重复内容越少
presence_penalty: -0.5, // 越大,越容易出现新内容
// frequency_penalty: 0.5, // 越大,重复内容越少
// presence_penalty: -0.5, // 越大,越容易出现新内容
stream
},
{
@@ -184,7 +186,7 @@ export async function chatCompletion({
})();
return {
answer
answerText: answer
};
}

View File

@@ -92,8 +92,9 @@ export async function kbSearch({
const searchRes: QuoteItemType[] = res?.[2]?.rows || [];
// filter part quote by maxToken
const sliceResult = modelToolMap['gpt-3.5-turbo']
const sliceResult = modelToolMap
.tokenSlice({
model: 'gpt-3.5-turbo',
maxToken,
messages: searchRes.map((item, i) => ({
obj: ChatRoleEnum.System,

View File

@@ -10,12 +10,7 @@ import { getChatHistory } from './getHistory';
import { saveChat } from '@/pages/api/chat/saveChat';
import { sseResponse } from '@/service/utils/tools';
import { type ChatCompletionRequestMessage } from 'openai';
import {
kbChatAppDemo,
chatAppDemo,
SpecificInputEnum,
AppModuleItemTypeEnum
} from '@/constants/app';
import { SpecificInputEnum, AppModuleItemTypeEnum } from '@/constants/app';
import { model, Types } from 'mongoose';
import { moduleFetch } from '@/service/api/request';
import { AppModuleItemType, RunningModuleItemType } from '@/types/app';
@@ -42,7 +37,6 @@ export type ChatResponseType = {
quoteLen?: number;
};
/* 发送提示词 */
export default withNextCors(async function handler(req: NextApiRequest, res: NextApiResponse) {
res.on('close', () => {
res.end();
@@ -117,7 +111,6 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
},
stream
});
console.log(responseData, answerText);
// save chat
if (typeof chatId === 'string') {
@@ -354,7 +347,7 @@ function loadModules(modules: AppModuleItemType[]): RunningModuleItemType[] {
})),
outputs: module.outputs.map((item) => ({
key: item.key,
answer: item.type === FlowOutputItemTypeEnum.answer,
answer: item.key === SpecificInputEnum.answerText,
response: item.response,
value: undefined,
targets: item.targets