From 90406fce9ea710ac9bbd07312e5c3b802909aa7f Mon Sep 17 00:00:00 2001 From: archer <545436317@qq.com> Date: Mon, 7 Aug 2023 13:48:53 +0800 Subject: [PATCH] feat: default message --- client/data/config.json | 17 ++++------- .../src/service/moduleDispatch/chat/oneapi.ts | 29 ++++++++++++++----- client/src/types/model.d.ts | 2 ++ 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/client/data/config.json b/client/data/config.json index bffa8d34b..8bfc16784 100644 --- a/client/data/config.json +++ b/client/data/config.json @@ -22,7 +22,8 @@ "contextMaxToken": 4000, "quoteMaxToken": 2000, "maxTemperature": 1.2, - "price": 1.5 + "price": 1.5, + "defaultSystem": "" }, { "model": "gpt-3.5-turbo-16k", @@ -30,15 +31,8 @@ "contextMaxToken": 16000, "quoteMaxToken": 8000, "maxTemperature": 1.2, - "price": 3 - }, - { - "model": "ERNIE-Bot", - "name": "文心一言", - "contextMaxToken": 3000, - "quoteMaxToken": 1500, - "maxTemperature": 1, - "price": 1.2 + "price": 3, + "defaultSystem": "" }, { "model": "gpt-4", @@ -46,7 +40,8 @@ "contextMaxToken": 8000, "quoteMaxToken": 4000, "maxTemperature": 1.2, - "price": 45 + "price": 45, + "defaultSystem": "" } ], "QAModels": [ diff --git a/client/src/service/moduleDispatch/chat/oneapi.ts b/client/src/service/moduleDispatch/chat/oneapi.ts index c993eba84..978f0309a 100644 --- a/client/src/service/moduleDispatch/chat/oneapi.ts +++ b/client/src/service/moduleDispatch/chat/oneapi.ts @@ -16,6 +16,7 @@ import { countModelPrice } from '@/service/events/pushBill'; import { ChatModelItemType } from '@/types/model'; import { UserModelSchema } from '@/types/mongoSchema'; import { textCensor } from '@/service/api/plugins'; +import { ChatCompletionRequestMessageRoleEnum } from 'openai'; export type ChatProps = { res: NextApiResponse; @@ -66,13 +67,15 @@ export const dispatchChatCompletion = async (props: Record): Promis model: modelConstantsData }); - await textCensor({ - text: `${systemPrompt} - ${quotePrompt} - ${limitPrompt} - ${userChatInput} - ` - }); + if (modelConstantsData.censor) { + await textCensor({ + text: `${systemPrompt} + ${quotePrompt} + ${limitPrompt} + ${userChatInput} + ` + }); + } const { messages, filterMessages } = getChatMessages({ model: modelConstantsData, @@ -98,7 +101,17 @@ export const dispatchChatCompletion = async (props: Record): Promis model, temperature, max_tokens, - messages, + messages: [ + ...(modelConstantsData.defaultSystem + ? [ + { + role: ChatCompletionRequestMessageRoleEnum.System, + content: modelConstantsData.defaultSystem + } + ] + : []), + ...messages + ], // frequency_penalty: 0.5, // 越大,重复内容越少 // presence_penalty: -0.5, // 越大,越容易出现新内容 stream diff --git a/client/src/types/model.d.ts b/client/src/types/model.d.ts index 0f42b9fdb..379e82af0 100644 --- a/client/src/types/model.d.ts +++ b/client/src/types/model.d.ts @@ -5,6 +5,8 @@ export type ChatModelItemType = { quoteMaxToken: number; maxTemperature: number; price: number; + censor?: boolean; + defaultSystem?: string; }; export type QAModelItemType = { model: string;