mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-29 09:44:47 +00:00
feat: function call prompt version (#331)
This commit is contained in:
@@ -62,5 +62,21 @@
|
||||
"name": "GPT35-16k",
|
||||
"maxToken": 16000,
|
||||
"price": 0
|
||||
},
|
||||
"ExtractModel": {
|
||||
"model": "gpt-3.5-turbo-16k",
|
||||
"functionCall": false,
|
||||
"name": "GPT35-16k",
|
||||
"maxToken": 16000,
|
||||
"price": 0,
|
||||
"prompt": ""
|
||||
},
|
||||
"CQModel": {
|
||||
"model": "gpt-3.5-turbo-16k",
|
||||
"functionCall": false,
|
||||
"name": "GPT35-16k",
|
||||
"maxToken": 16000,
|
||||
"price": 0,
|
||||
"prompt": ""
|
||||
}
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
### Fast GPT V4.4.3
|
||||
### Fast GPT V4.4.4
|
||||
|
||||
1. 去除 - 限定词。目前旧应用仍生效,9/25 后全面去除,请及时替换。
|
||||
2. 新增 - 引用模板/引用提示词设置,可以 DIY 引用内容的格式,从而更好的适配场景。
|
||||
|
@@ -56,7 +56,7 @@ const ResponseTags = ({
|
||||
|
||||
return responseData.length === 0 ? null : (
|
||||
<Flex alignItems={'center'} mt={2} flexWrap={'wrap'}>
|
||||
{chatAccount === 1 ? (
|
||||
{chatAccount === 1 && (
|
||||
<>
|
||||
{quoteList.length > 0 && (
|
||||
<MyTooltip label="查看引用">
|
||||
@@ -83,7 +83,8 @@ const ResponseTags = ({
|
||||
</MyTooltip>
|
||||
)}
|
||||
</>
|
||||
) : (
|
||||
)}
|
||||
{chatAccount > 1 && (
|
||||
<Tag colorSchema="blue" {...TagStyles}>
|
||||
多组 AI 对话
|
||||
</Tag>
|
||||
|
@@ -5,7 +5,8 @@ import { readFileSync } from 'fs';
|
||||
import {
|
||||
type QAModelItemType,
|
||||
type ChatModelItemType,
|
||||
type VectorModelItemType
|
||||
type VectorModelItemType,
|
||||
FunctionModelItemType
|
||||
} from '@/types/model';
|
||||
|
||||
export type InitDateResponse = {
|
||||
@@ -83,6 +84,22 @@ const defaultQAModel = {
|
||||
maxToken: 16000,
|
||||
price: 0
|
||||
};
|
||||
const defaultExtractModel: FunctionModelItemType = {
|
||||
model: 'gpt-3.5-turbo-16k',
|
||||
name: 'GPT35-16k',
|
||||
maxToken: 16000,
|
||||
price: 0,
|
||||
prompt: '',
|
||||
functionCall: true
|
||||
};
|
||||
const defaultCQModel: FunctionModelItemType = {
|
||||
model: 'gpt-3.5-turbo-16k',
|
||||
name: 'GPT35-16k',
|
||||
maxToken: 16000,
|
||||
price: 0,
|
||||
prompt: '',
|
||||
functionCall: true
|
||||
};
|
||||
|
||||
const defaultVectorModels: VectorModelItemType[] = [
|
||||
{
|
||||
@@ -114,6 +131,8 @@ export async function getInitConfig() {
|
||||
global.feConfigs = res.FeConfig ? { ...defaultFeConfigs, ...res.FeConfig } : defaultFeConfigs;
|
||||
global.chatModels = res.ChatModels || defaultChatModels;
|
||||
global.qaModel = res.QAModel || defaultQAModel;
|
||||
global.extractModel = res.ExtractModel || defaultExtractModel;
|
||||
global.cqModel = res.CQModel || defaultCQModel;
|
||||
global.vectorModels = res.VectorModels || defaultVectorModels;
|
||||
} catch (error) {
|
||||
setDefaultData();
|
||||
|
@@ -14,3 +14,45 @@ A2:
|
||||
我的文本:"""{{text}}"""`,
|
||||
defaultTheme: '它们可能包含多个主题内容'
|
||||
};
|
||||
|
||||
export const Prompt_ExtractJson = `你可以从 "对话记录" 中提取指定信息,并返回一个 JSON 对象,JSON 对象要求:
|
||||
1. JSON 对象仅包含字段说明中的值。
|
||||
2. 字段说明中的 required 决定 JSON 对象是否必须存在该字段。
|
||||
3. 必须存在的字段,值可以为空字符串或根据提取要求来设置,不能随机生成值。
|
||||
|
||||
提取要求:
|
||||
"""
|
||||
{{description}}
|
||||
"""
|
||||
|
||||
字段说明:
|
||||
"""
|
||||
{{json}}
|
||||
"""
|
||||
|
||||
对话记录:
|
||||
"""
|
||||
{{text}}
|
||||
"""
|
||||
`;
|
||||
|
||||
export const Prompt_CQJson = `我会给你几个问题类型,请参考额外的背景知识(可能为空)和对话内容,判断我本次的问题类型,并返回对应类型的 ID,格式为 JSON 字符串:
|
||||
"""
|
||||
'{"type":"问题类型的 ID"}'
|
||||
"""
|
||||
|
||||
问题类型:
|
||||
"""
|
||||
{{typeList}}
|
||||
"""
|
||||
|
||||
额外背景知识:
|
||||
"""
|
||||
{{systemPrompt}}
|
||||
"""
|
||||
|
||||
对话内容:
|
||||
"""
|
||||
{{text}}
|
||||
"""
|
||||
`;
|
||||
|
@@ -20,6 +20,7 @@ export const pushTaskBill = async ({
|
||||
shareId?: string;
|
||||
response: ChatHistoryItemResType[];
|
||||
}) => {
|
||||
try {
|
||||
const total = response.reduce((sum, item) => sum + item.price, 0);
|
||||
|
||||
await Promise.allSettled([
|
||||
@@ -30,7 +31,7 @@ export const pushTaskBill = async ({
|
||||
total,
|
||||
source,
|
||||
list: response.map((item) => ({
|
||||
moduleType: item.moduleType,
|
||||
moduleName: item.moduleName,
|
||||
amount: item.price || 0,
|
||||
model: item.model,
|
||||
tokenLen: item.tokens
|
||||
@@ -54,6 +55,9 @@ export const pushTaskBill = async ({
|
||||
userId,
|
||||
price: formatPrice(total)
|
||||
});
|
||||
} catch (error) {
|
||||
addLog.error(`pushTaskBill error`, error);
|
||||
}
|
||||
};
|
||||
|
||||
export const updateShareChatBill = async ({
|
||||
|
@@ -31,24 +31,7 @@ const BillSchema = new Schema({
|
||||
default: BillSourceEnum.fastgpt
|
||||
},
|
||||
list: {
|
||||
type: [
|
||||
{
|
||||
moduleName: {
|
||||
type: String,
|
||||
required: true
|
||||
},
|
||||
amount: {
|
||||
type: Number,
|
||||
required: true
|
||||
},
|
||||
model: {
|
||||
type: String
|
||||
},
|
||||
tokenLen: {
|
||||
type: Number
|
||||
}
|
||||
}
|
||||
],
|
||||
type: Array,
|
||||
default: []
|
||||
}
|
||||
});
|
||||
|
@@ -4,40 +4,69 @@ import type { ChatHistoryItemResType, ChatItemType } from '@/types/chat';
|
||||
import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { getAIChatApi, axiosConfig } from '@/service/lib/openai';
|
||||
import type { ClassifyQuestionAgentItemType } from '@/types/app';
|
||||
import { countModelPrice } from '@/service/events/pushBill';
|
||||
import { getModel } from '@/service/utils/data';
|
||||
import { SystemInputEnum } from '@/constants/app';
|
||||
import { SpecialInputKeyEnum } from '@/constants/flow';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import { ModuleDispatchProps } from '@/types/core/modules';
|
||||
import { replaceVariable } from '@/utils/common/tools/text';
|
||||
import { Prompt_CQJson } from '@/prompts/core/agent';
|
||||
|
||||
export type CQProps = ModuleDispatchProps<{
|
||||
type Props = ModuleDispatchProps<{
|
||||
systemPrompt?: string;
|
||||
history?: ChatItemType[];
|
||||
[SystemInputEnum.userChatInput]: string;
|
||||
[SpecialInputKeyEnum.agents]: ClassifyQuestionAgentItemType[];
|
||||
}>;
|
||||
export type CQResponse = {
|
||||
type CQResponse = {
|
||||
[TaskResponseKeyEnum.responseData]: ChatHistoryItemResType;
|
||||
[key: string]: any;
|
||||
};
|
||||
|
||||
const agentModel = 'gpt-3.5-turbo';
|
||||
const agentFunName = 'agent_user_question';
|
||||
const maxTokens = 3000;
|
||||
|
||||
/* request openai chat */
|
||||
export const dispatchClassifyQuestion = async (props: Record<string, any>): Promise<CQResponse> => {
|
||||
export const dispatchClassifyQuestion = async (props: Props): Promise<CQResponse> => {
|
||||
const {
|
||||
moduleName,
|
||||
userOpenaiAccount,
|
||||
inputs: { agents, systemPrompt, history = [], userChatInput }
|
||||
} = props as CQProps;
|
||||
inputs: { agents, userChatInput }
|
||||
} = props as Props;
|
||||
|
||||
if (!userChatInput) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const cqModel = global.cqModel;
|
||||
|
||||
const { arg, tokens } = await (async () => {
|
||||
if (cqModel.functionCall) {
|
||||
return functionCall(props);
|
||||
}
|
||||
return completions(props);
|
||||
})();
|
||||
|
||||
const result = agents.find((item) => item.key === arg?.type) || agents[0];
|
||||
|
||||
return {
|
||||
[result.key]: 1,
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
moduleType: FlowModuleTypeEnum.classifyQuestion,
|
||||
moduleName,
|
||||
price: userOpenaiAccount?.key ? 0 : cqModel.price * tokens,
|
||||
model: cqModel.name || '',
|
||||
tokens,
|
||||
cqList: agents,
|
||||
cqResult: result.value
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
async function functionCall({
|
||||
userOpenaiAccount,
|
||||
inputs: { agents, systemPrompt, history = [], userChatInput }
|
||||
}: Props) {
|
||||
const cqModel = global.cqModel;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...(systemPrompt
|
||||
? [
|
||||
@@ -55,14 +84,14 @@ export const dispatchClassifyQuestion = async (props: Record<string, any>): Prom
|
||||
];
|
||||
const filterMessages = ChatContextFilter({
|
||||
messages,
|
||||
maxTokens
|
||||
maxTokens: cqModel.maxToken
|
||||
});
|
||||
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
|
||||
|
||||
// function body
|
||||
const agentFunction = {
|
||||
name: agentFunName,
|
||||
description: '判断用户问题的类型属于哪方面,返回对应的枚举字段',
|
||||
description: '判断用户问题的类型属于哪方面,返回对应的字段',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
@@ -79,7 +108,7 @@ export const dispatchClassifyQuestion = async (props: Record<string, any>): Prom
|
||||
|
||||
const response = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: agentModel,
|
||||
model: cqModel.model,
|
||||
temperature: 0,
|
||||
messages: [...adaptMessages],
|
||||
function_call: { name: agentFunName },
|
||||
@@ -92,20 +121,51 @@ export const dispatchClassifyQuestion = async (props: Record<string, any>): Prom
|
||||
|
||||
const arg = JSON.parse(response.data.choices?.[0]?.message?.function_call?.arguments || '');
|
||||
|
||||
const tokens = response.data.usage?.total_tokens || 0;
|
||||
return {
|
||||
arg,
|
||||
tokens: response.data.usage?.total_tokens || 0
|
||||
};
|
||||
}
|
||||
|
||||
const result = agents.find((item) => item.key === arg?.type) || agents[0];
|
||||
async function completions({
|
||||
userOpenaiAccount,
|
||||
inputs: { agents, systemPrompt = '', history = [], userChatInput }
|
||||
}: Props) {
|
||||
const extractModel = global.extractModel;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: replaceVariable(extractModel.prompt || Prompt_CQJson, {
|
||||
systemPrompt,
|
||||
typeList: agents.map((item) => `ID: "${item.key}", 问题类型:${item.value}`).join('\n'),
|
||||
text: `${history.map((item) => `${item.obj}:${item.value}`).join('\n')}
|
||||
Human:${userChatInput}`
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
const chatAPI = getAIChatApi(userOpenaiAccount);
|
||||
|
||||
const { data } = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: adaptChat2GptMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
},
|
||||
{
|
||||
timeout: 480000,
|
||||
...axiosConfig()
|
||||
}
|
||||
);
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
const totalTokens = data.usage?.total_tokens || 0;
|
||||
|
||||
const id = agents.find((item) => answer.includes(item.key))?.key || '';
|
||||
|
||||
return {
|
||||
[result.key]: 1,
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
moduleType: FlowModuleTypeEnum.classifyQuestion,
|
||||
moduleName,
|
||||
price: userOpenaiAccount?.key ? 0 : countModelPrice({ model: agentModel, tokens }),
|
||||
model: getModel(agentModel)?.name || agentModel,
|
||||
tokens,
|
||||
cqList: agents,
|
||||
cqResult: result.value
|
||||
}
|
||||
tokens: totalTokens,
|
||||
arg: { type: id }
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@@ -5,36 +5,88 @@ import { ChatRoleEnum, TaskResponseKeyEnum } from '@/constants/chat';
|
||||
import { getAIChatApi, axiosConfig } from '@/service/lib/openai';
|
||||
import type { ContextExtractAgentItemType } from '@/types/app';
|
||||
import { ContextExtractEnum } from '@/constants/flow/flowField';
|
||||
import { countModelPrice } from '@/service/events/pushBill';
|
||||
import { getModel } from '@/service/utils/data';
|
||||
import { FlowModuleTypeEnum } from '@/constants/flow';
|
||||
import { ModuleDispatchProps } from '@/types/core/modules';
|
||||
import { Prompt_ExtractJson } from '@/prompts/core/agent';
|
||||
import { replaceVariable } from '@/utils/common/tools/text';
|
||||
|
||||
export type Props = ModuleDispatchProps<{
|
||||
type Props = ModuleDispatchProps<{
|
||||
history?: ChatItemType[];
|
||||
[ContextExtractEnum.content]: string;
|
||||
[ContextExtractEnum.extractKeys]: ContextExtractAgentItemType[];
|
||||
[ContextExtractEnum.description]: string;
|
||||
}>;
|
||||
export type Response = {
|
||||
type Response = {
|
||||
[ContextExtractEnum.success]?: boolean;
|
||||
[ContextExtractEnum.failed]?: boolean;
|
||||
[ContextExtractEnum.fields]: string;
|
||||
[TaskResponseKeyEnum.responseData]: ChatHistoryItemResType;
|
||||
};
|
||||
|
||||
const agentModel = 'gpt-3.5-turbo';
|
||||
const agentFunName = 'agent_extract_data';
|
||||
const maxTokens = 4000;
|
||||
|
||||
export async function dispatchContentExtract({
|
||||
export async function dispatchContentExtract(props: Props): Promise<Response> {
|
||||
const {
|
||||
moduleName,
|
||||
userOpenaiAccount,
|
||||
inputs: { content, extractKeys, history = [], description }
|
||||
}: Props): Promise<Response> {
|
||||
inputs: { content, description, extractKeys }
|
||||
} = props;
|
||||
|
||||
if (!content) {
|
||||
return Promise.reject('Input is empty');
|
||||
}
|
||||
|
||||
const extractModel = global.extractModel;
|
||||
|
||||
const { arg, tokens } = await (async () => {
|
||||
if (extractModel.functionCall) {
|
||||
return functionCall(props);
|
||||
}
|
||||
return completions(props);
|
||||
})();
|
||||
|
||||
// remove invalid key
|
||||
for (let key in arg) {
|
||||
if (!extractKeys.find((item) => item.key === key)) {
|
||||
delete arg[key];
|
||||
}
|
||||
}
|
||||
|
||||
// auth fields
|
||||
let success = !extractKeys.find((item) => !arg[item.key]);
|
||||
// auth empty value
|
||||
if (success) {
|
||||
for (const key in arg) {
|
||||
if (arg[key] === '') {
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
[ContextExtractEnum.success]: success ? true : undefined,
|
||||
[ContextExtractEnum.failed]: success ? undefined : true,
|
||||
[ContextExtractEnum.fields]: JSON.stringify(arg),
|
||||
...arg,
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
moduleType: FlowModuleTypeEnum.contentExtract,
|
||||
moduleName,
|
||||
price: userOpenaiAccount?.key ? 0 : extractModel.price * tokens,
|
||||
model: extractModel.name || '',
|
||||
tokens,
|
||||
extractDescription: description,
|
||||
extractResult: arg
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
async function functionCall({
|
||||
userOpenaiAccount,
|
||||
inputs: { history = [], content, extractKeys, description }
|
||||
}: Props) {
|
||||
const extractModel = global.extractModel;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
...history,
|
||||
{
|
||||
@@ -44,7 +96,7 @@ export async function dispatchContentExtract({
|
||||
];
|
||||
const filterMessages = ChatContextFilter({
|
||||
messages,
|
||||
maxTokens
|
||||
maxTokens: extractModel.maxToken
|
||||
});
|
||||
const adaptMessages = adaptChat2GptMessages({ messages: filterMessages, reserveId: false });
|
||||
|
||||
@@ -77,7 +129,7 @@ export async function dispatchContentExtract({
|
||||
|
||||
const response = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: agentModel,
|
||||
model: extractModel.model,
|
||||
temperature: 0,
|
||||
messages: [...adaptMessages],
|
||||
function_call: { name: agentFunName },
|
||||
@@ -96,33 +148,79 @@ export async function dispatchContentExtract({
|
||||
}
|
||||
})();
|
||||
|
||||
// auth fields
|
||||
let success = !extractKeys.find((item) => !arg[item.key]);
|
||||
// auth empty value
|
||||
if (success) {
|
||||
for (const key in arg) {
|
||||
if (arg[key] === '') {
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const tokens = response.data.usage?.total_tokens || 0;
|
||||
|
||||
return {
|
||||
[ContextExtractEnum.success]: success ? true : undefined,
|
||||
[ContextExtractEnum.failed]: success ? undefined : true,
|
||||
[ContextExtractEnum.fields]: JSON.stringify(arg),
|
||||
...arg,
|
||||
[TaskResponseKeyEnum.responseData]: {
|
||||
moduleType: FlowModuleTypeEnum.contentExtract,
|
||||
moduleName,
|
||||
price: userOpenaiAccount?.key ? 0 : countModelPrice({ model: agentModel, tokens }),
|
||||
model: getModel(agentModel)?.name || agentModel,
|
||||
tokens,
|
||||
extractDescription: description,
|
||||
extractResult: arg
|
||||
}
|
||||
arg
|
||||
};
|
||||
}
|
||||
|
||||
async function completions({
|
||||
userOpenaiAccount,
|
||||
inputs: { history = [], content, extractKeys, description }
|
||||
}: Props) {
|
||||
const extractModel = global.extractModel;
|
||||
|
||||
const messages: ChatItemType[] = [
|
||||
{
|
||||
obj: ChatRoleEnum.Human,
|
||||
value: replaceVariable(extractModel.prompt || Prompt_ExtractJson, {
|
||||
description,
|
||||
json: extractKeys
|
||||
.map(
|
||||
(item) =>
|
||||
`key="${item.key}",描述="${item.desc}",required="${
|
||||
item.required ? 'true' : 'false'
|
||||
}"`
|
||||
)
|
||||
.join('\n'),
|
||||
text: `${history.map((item) => `${item.obj}:${item.value}`).join('\n')}
|
||||
Human: ${content}`
|
||||
})
|
||||
}
|
||||
];
|
||||
|
||||
const chatAPI = getAIChatApi(userOpenaiAccount);
|
||||
|
||||
const { data } = await chatAPI.createChatCompletion(
|
||||
{
|
||||
model: extractModel.model,
|
||||
temperature: 0.01,
|
||||
messages: adaptChat2GptMessages({ messages, reserveId: false }),
|
||||
stream: false
|
||||
},
|
||||
{
|
||||
timeout: 480000,
|
||||
...axiosConfig()
|
||||
}
|
||||
);
|
||||
const answer = data.choices?.[0].message?.content || '';
|
||||
const totalTokens = data.usage?.total_tokens || 0;
|
||||
|
||||
// parse response
|
||||
const start = answer.indexOf('{');
|
||||
const end = answer.lastIndexOf('}');
|
||||
|
||||
if (start === -1 || end === -1)
|
||||
return {
|
||||
tokens: totalTokens,
|
||||
arg: {}
|
||||
};
|
||||
|
||||
const jsonStr = answer
|
||||
.substring(start, end + 1)
|
||||
.replace(/(\\n|\\)/g, '')
|
||||
.replace(/ /g, '');
|
||||
|
||||
try {
|
||||
return {
|
||||
tokens: totalTokens,
|
||||
arg: JSON.parse(jsonStr) as Record<string, any>
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
tokens: totalTokens,
|
||||
arg: {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@@ -14,5 +14,11 @@ export const getVectorModel = (model?: string) => {
|
||||
};
|
||||
|
||||
export const getModel = (model?: string) => {
|
||||
return [...global.chatModels, ...global.vectorModels].find((item) => item.model === model);
|
||||
return [
|
||||
...global.chatModels,
|
||||
...global.vectorModels,
|
||||
global.qaModel,
|
||||
global.extractModel,
|
||||
global.cqModel
|
||||
].find((item) => item.model === model);
|
||||
};
|
||||
|
9
client/src/types/index.d.ts
vendored
9
client/src/types/index.d.ts
vendored
@@ -3,7 +3,12 @@ import type { Agent } from 'http';
|
||||
import type { Pool } from 'pg';
|
||||
import type { Tiktoken } from 'js-tiktoken';
|
||||
import type { Logger } from 'winston';
|
||||
import { ChatModelItemType, QAModelItemType, VectorModelItemType } from './model';
|
||||
import {
|
||||
ChatModelItemType,
|
||||
FunctionModelItemType,
|
||||
QAModelItemType,
|
||||
VectorModelItemType
|
||||
} from './model';
|
||||
import { TrackEventName } from '@/constants/common';
|
||||
|
||||
export type PagingData<T> = {
|
||||
@@ -62,6 +67,8 @@ declare global {
|
||||
var systemEnv: SystemEnvType;
|
||||
var chatModels: ChatModelItemType[];
|
||||
var qaModel: QAModelItemType;
|
||||
var extractModel: FunctionModelItemType;
|
||||
var cqModel: FunctionModelItemType;
|
||||
var vectorModels: VectorModelItemType[];
|
||||
var systemVersion: string;
|
||||
|
||||
|
8
client/src/types/model.d.ts
vendored
8
client/src/types/model.d.ts
vendored
@@ -21,3 +21,11 @@ export type VectorModelItemType = {
|
||||
price: number;
|
||||
maxToken: number;
|
||||
};
|
||||
export type FunctionModelItemType = {
|
||||
model: string;
|
||||
name: string;
|
||||
maxToken: number;
|
||||
price: number;
|
||||
prompt: string;
|
||||
functionCall: boolean;
|
||||
};
|
||||
|
Reference in New Issue
Block a user