mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 21:13:50 +00:00
v4.6 -1 (#459)
This commit is contained in:
@@ -2,5 +2,6 @@ export enum ChatCompletionRequestMessageRoleEnum {
|
||||
'System' = 'system',
|
||||
'User' = 'user',
|
||||
'Assistant' = 'assistant',
|
||||
'Function' = 'function'
|
||||
'Function' = 'function',
|
||||
'Tool' = 'tool'
|
||||
}
|
||||
|
2
packages/global/core/ai/index.ts
Normal file
2
packages/global/core/ai/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
import OpenAI from 'openai';
|
||||
export default OpenAI;
|
32
packages/global/core/ai/model.d.ts
vendored
Normal file
32
packages/global/core/ai/model.d.ts
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
export type LLMModelItemType = {
|
||||
model: string;
|
||||
name: string;
|
||||
maxContext: number;
|
||||
maxResponse: number;
|
||||
price: number;
|
||||
};
|
||||
export type ChatModelItemType = LLMModelItemType & {
|
||||
quoteMaxToken: number;
|
||||
maxTemperature: number;
|
||||
censor?: boolean;
|
||||
defaultSystemChatPrompt?: string;
|
||||
};
|
||||
|
||||
export type FunctionModelItemType = LLMModelItemType & {
|
||||
functionCall: boolean;
|
||||
functionPrompt: string;
|
||||
};
|
||||
|
||||
export type VectorModelItemType = {
|
||||
model: string;
|
||||
name: string;
|
||||
defaultToken: number;
|
||||
price: number;
|
||||
maxToken: number;
|
||||
};
|
||||
|
||||
export type AudioSpeechModelType = {
|
||||
model: string;
|
||||
name: string;
|
||||
price: number;
|
||||
};
|
115
packages/global/core/ai/model.ts
Normal file
115
packages/global/core/ai/model.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
import type {
|
||||
LLMModelItemType,
|
||||
ChatModelItemType,
|
||||
FunctionModelItemType,
|
||||
VectorModelItemType,
|
||||
AudioSpeechModelType
|
||||
} from './model.d';
|
||||
|
||||
export const defaultChatModels: ChatModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-1106',
|
||||
name: 'GPT35-1106',
|
||||
price: 0,
|
||||
maxContext: 16000,
|
||||
maxResponse: 4000,
|
||||
quoteMaxToken: 2000,
|
||||
maxTemperature: 1.2,
|
||||
censor: false,
|
||||
defaultSystemChatPrompt: ''
|
||||
},
|
||||
{
|
||||
model: 'gpt-3.5-turbo-16k',
|
||||
name: 'GPT35-16k',
|
||||
maxContext: 16000,
|
||||
maxResponse: 16000,
|
||||
price: 0,
|
||||
quoteMaxToken: 8000,
|
||||
maxTemperature: 1.2,
|
||||
censor: false,
|
||||
defaultSystemChatPrompt: ''
|
||||
},
|
||||
{
|
||||
model: 'gpt-4',
|
||||
name: 'GPT4-8k',
|
||||
maxContext: 8000,
|
||||
maxResponse: 8000,
|
||||
price: 0,
|
||||
quoteMaxToken: 4000,
|
||||
maxTemperature: 1.2,
|
||||
censor: false,
|
||||
defaultSystemChatPrompt: ''
|
||||
}
|
||||
];
|
||||
export const defaultQAModels: LLMModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-16k',
|
||||
name: 'GPT35-16k',
|
||||
maxContext: 16000,
|
||||
maxResponse: 16000,
|
||||
price: 0
|
||||
}
|
||||
];
|
||||
export const defaultCQModels: FunctionModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-1106',
|
||||
name: 'GPT35-1106',
|
||||
maxContext: 16000,
|
||||
maxResponse: 4000,
|
||||
price: 0,
|
||||
functionCall: true,
|
||||
functionPrompt: ''
|
||||
},
|
||||
{
|
||||
model: 'gpt-4',
|
||||
name: 'GPT4-8k',
|
||||
maxContext: 8000,
|
||||
maxResponse: 8000,
|
||||
price: 0,
|
||||
functionCall: true,
|
||||
functionPrompt: ''
|
||||
}
|
||||
];
|
||||
export const defaultExtractModels: FunctionModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-1106',
|
||||
name: 'GPT35-1106',
|
||||
maxContext: 16000,
|
||||
maxResponse: 4000,
|
||||
price: 0,
|
||||
functionCall: true,
|
||||
functionPrompt: ''
|
||||
}
|
||||
];
|
||||
export const defaultQGModels: LLMModelItemType[] = [
|
||||
{
|
||||
model: 'gpt-3.5-turbo-1106',
|
||||
name: 'GPT35-1106',
|
||||
maxContext: 1600,
|
||||
maxResponse: 4000,
|
||||
price: 0
|
||||
}
|
||||
];
|
||||
|
||||
export const defaultVectorModels: VectorModelItemType[] = [
|
||||
{
|
||||
model: 'text-embedding-ada-002',
|
||||
name: 'Embedding-2',
|
||||
price: 0,
|
||||
defaultToken: 500,
|
||||
maxToken: 3000
|
||||
}
|
||||
];
|
||||
|
||||
export const defaultAudioSpeechModels: AudioSpeechModelType[] = [
|
||||
{
|
||||
model: 'tts-1',
|
||||
name: 'OpenAI TTS1',
|
||||
price: 0
|
||||
},
|
||||
{
|
||||
model: 'tts-1-hd',
|
||||
name: 'OpenAI TTS1',
|
||||
price: 0
|
||||
}
|
||||
];
|
8
packages/global/core/ai/speech/api.d.ts
vendored
Normal file
8
packages/global/core/ai/speech/api.d.ts
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
import { Text2SpeechVoiceEnum } from './constant';
|
||||
|
||||
export type Text2SpeechProps = {
|
||||
model?: string;
|
||||
voice?: `${Text2SpeechVoiceEnum}`;
|
||||
input: string;
|
||||
speed?: number;
|
||||
};
|
17
packages/global/core/ai/speech/constant.ts
Normal file
17
packages/global/core/ai/speech/constant.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
export enum Text2SpeechVoiceEnum {
|
||||
alloy = 'alloy',
|
||||
echo = 'echo',
|
||||
fable = 'fable',
|
||||
onyx = 'onyx',
|
||||
nova = 'nova',
|
||||
shimmer = 'shimmer'
|
||||
}
|
||||
export const openaiTTSList = [
|
||||
Text2SpeechVoiceEnum.alloy,
|
||||
Text2SpeechVoiceEnum.echo,
|
||||
Text2SpeechVoiceEnum.fable,
|
||||
Text2SpeechVoiceEnum.onyx,
|
||||
Text2SpeechVoiceEnum.nova,
|
||||
Text2SpeechVoiceEnum.shimmer
|
||||
];
|
||||
export const openaiTTSModel = 'tts-1';
|
20
packages/global/core/ai/type.d.ts
vendored
20
packages/global/core/ai/type.d.ts
vendored
@@ -1,9 +1,19 @@
|
||||
import OpenAI from 'openai';
|
||||
export type ChatCompletionRequestMessage = OpenAI.Chat.CreateChatCompletionRequestMessage;
|
||||
export type ChatCompletion = OpenAI.Chat.ChatCompletion;
|
||||
export type CreateChatCompletionRequest = OpenAI.Chat.ChatCompletionCreateParams;
|
||||
import type {
|
||||
ChatCompletion,
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionChunk,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionContentPart
|
||||
} from 'openai/resources';
|
||||
export type ChatCompletionContentPart = ChatCompletionContentPart;
|
||||
export type ChatCompletionCreateParams = ChatCompletionCreateParams;
|
||||
export type ChatMessageItemType = Omit<ChatCompletionMessageParam> & {
|
||||
dataId?: string;
|
||||
content: any;
|
||||
};
|
||||
|
||||
export type StreamChatType = Stream<OpenAI.Chat.ChatCompletionChunk>;
|
||||
export type ChatCompletion = ChatCompletion;
|
||||
export type StreamChatType = Stream<ChatCompletionChunk>;
|
||||
|
||||
export type PromptTemplateItem = {
|
||||
title: string;
|
||||
|
Reference in New Issue
Block a user