docs and embedding bill

This commit is contained in:
archer
2023-06-05 18:58:38 +08:00
parent 1111f07fa7
commit 942aeeac2e
11 changed files with 66 additions and 32 deletions

View File

@@ -34,13 +34,13 @@ run: ## Run a dev service from host.
.PHONY: docker-build
docker-build: ## Build docker image with the desktop-frontend.
docker build -t registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:latest . --network host --build-arg HTTP_PROXY=http://127.0.0.1:7890 --build-arg HTTPS_PROXY=http://127.0.0.1:7890
docker build -t c121914yu/fast-gpt:latest . --network host --build-arg HTTP_PROXY=http://127.0.0.1:7890 --build-arg HTTPS_PROXY=http://127.0.0.1:7890
##@ Deployment
.PHONY: docker-run
docker-run: ## Push docker image.
docker run -d -p 8008:3000 --name fastgpt -v /web_project/yjl/fastgpt/logs:/app/.next/logs registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:latest
docker run -d -p 8008:3000 --name fastgpt -v /web_project/yjl/fastgpt/logs:/app/.next/logs c121914yu/fast-gpt:latest
#TODO: add support of docker push

View File

@@ -31,7 +31,9 @@ services:
- /root/fastgpt/mongo/logs:/var/log/mongodb
- /etc/localtime:/etc/localtime:ro
fastgpt:
image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:latest
image: ghcr.io/c121914yu/fast-gpt:latest # github
# image: c121914yu/fast-gpt:latest # docker hub
# image: registry.cn-hangzhou.aliyuncs.com/fastgpt/fastgpt:latest # 阿里云
network_mode: host
restart: always
container_name: fastgpt

View File

@@ -11,10 +11,10 @@
移动端:点击对话头像,可以选择复制或删除该条内容。
**价格表**
如果使用了自己的 Api Key不会计费。可以在账号页看到详细账单。
如果使用了自己的 Api Key网页上 openai 模型聊天不会计费。可以在账号页,看到详细账单。
| 计费项 | 价格: 元/ 1K tokens包含上下文|
| --- | --- |
| 知识库 - 索引 | 免费 |
| 知识库 - 索引 | 0.001 |
| chatgpt - 对话 | 0.025 |
| gpt4 - 对话 | 0.5 |
| 文件拆分 | 0.025 |

View File

@@ -15,10 +15,10 @@ FastGpt 项目完全开源,可随意私有化部署,去除平台风险忧虑
### 价格表
如果使用了自己的 Api Key不会计费。可以在账号页看到详细账单。
如果使用了自己的 Api Key网页上 openai 模型聊天不会计费。可以在账号页,看到详细账单。
| 计费项 | 价格: 元/ 1K tokens包含上下文|
| --- | --- |
| 知识库 - 索引 | 免费 |
| 知识库 - 索引 | 0.001 |
| chatgpt - 对话 | 0.025 |
| gpt4 - 对话 | 0.5 |
| 文件拆分 | 0.025 |

View File

@@ -3,6 +3,7 @@ import type { ShareChatEditType } from '@/types/model';
import type { ModelSchema } from '@/types/mongoSchema';
export const embeddingModel = 'text-embedding-ada-002';
export const embeddingPrice = 0.1;
export type EmbeddingModelType = 'text-embedding-ada-002';
export enum OpenAiChatEnum {

View File

@@ -1,6 +1,6 @@
import type { NextApiRequest, NextApiResponse } from 'next';
import { jsonRes } from '@/service/response';
import { authUser, getSystemOpenAiKey } from '@/service/utils/auth';
import { authUser, getApiKey } from '@/service/utils/auth';
import { withNextCors } from '@/service/utils/tools';
import { getOpenAIApi } from '@/service/utils/chat/openai';
import { embeddingModel } from '@/constants/model';
@@ -24,7 +24,7 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
}
jsonRes<Response>(res, {
data: await openaiEmbedding({ userId, input, type })
data: await openaiEmbedding({ userId, input, type, mustPay: true })
});
} catch (err) {
console.log(err);
@@ -38,9 +38,15 @@ export default withNextCors(async function handler(req: NextApiRequest, res: Nex
export async function openaiEmbedding({
userId,
input,
mustPay = false,
type = 'chat'
}: { userId: string } & Props) {
const apiKey = getSystemOpenAiKey(type);
}: { userId: string; mustPay?: boolean } & Props) {
const { userOpenAiKey, systemAuthKey } = await getApiKey({
model: 'gpt-3.5-turbo',
userId,
mustPay,
type
});
// 获取 chatAPI
const chatAPI = getOpenAIApi();
@@ -54,7 +60,7 @@ export async function openaiEmbedding({
},
{
timeout: 60000,
...axiosConfig(apiKey)
...axiosConfig(userOpenAiKey || systemAuthKey)
}
)
.then((res) => ({
@@ -63,7 +69,7 @@ export async function openaiEmbedding({
}));
pushGenerateVectorBill({
isPay: false,
isPay: !userOpenAiKey,
userId,
text: input.join(''),
tokenLen: result.tokenLen

View File

@@ -22,20 +22,22 @@ import Radio from '@/components/Radio';
import { splitText_token } from '@/utils/file';
import { TrainingModeEnum } from '@/constants/plugin';
import { getErrText } from '@/utils/tools';
import { ChatModelMap, OpenAiChatEnum, embeddingPrice } from '@/constants/model';
import { formatPrice } from '@/utils/user';
const fileExtension = '.txt,.doc,.docx,.pdf,.md';
const modeMap = {
[TrainingModeEnum.qa]: {
maxLen: 2800,
slideLen: 800,
price: 4,
maxLen: 2600,
slideLen: 700,
price: ChatModelMap[OpenAiChatEnum.GPT35].price,
isPrompt: true
},
[TrainingModeEnum.index]: {
maxLen: 800,
maxLen: 700,
slideLen: 300,
price: 0.4,
price: embeddingPrice,
isPrompt: false
}
};
@@ -58,18 +60,18 @@ const SelectFileModal = ({
{ filename: '文本1', text: '' }
]);
const [splitRes, setSplitRes] = useState<{
tokens: number;
price: number;
chunks: { filename: string; value: string }[];
successChunks: number;
}>({
tokens: 0,
price: 0,
successChunks: 0,
chunks: []
});
const { openConfirm, ConfirmChild } = useConfirm({
content: `确认导入该文件需要一定时间进行拆解该任务无法终止QA 拆分仅能使用余额,如果余额不足,未完成的任务会被直接清除。一共 ${
splitRes.chunks.length
} 组。${splitRes.tokens ? `大约 ${splitRes.tokens} 个tokens` : ''}`
} 组。${splitRes.price ? `大约 ${splitRes.price}` : ''}`
});
const onSelectFile = useCallback(
@@ -166,8 +168,16 @@ const SelectFileModal = ({
}))
.filter((item) => item.tokens > 0);
let price = formatPrice(
splitRes.reduce((sum, item) => sum + item.tokens, 0) * modeMap[mode].price
);
if (mode === 'qa') {
price *= 1.2;
}
setSplitRes({
tokens: splitRes.reduce((sum, item) => sum + item.tokens, 0),
price,
chunks: splitRes
.map((item) =>
item.chunks.map((chunk) => ({

View File

@@ -17,6 +17,7 @@ import { useToast } from '@/hooks/useToast';
import { useQuery } from '@tanstack/react-query';
import { useRouter } from 'next/router';
import { getErrText } from '@/utils/tools';
import Markdown from '@/components/Markdown';
const PayModal = ({ onClose }: { onClose: () => void }) => {
const router = useRouter();
@@ -78,7 +79,7 @@ const PayModal = ({ onClose }: { onClose: () => void }) => {
}}
>
<ModalOverlay />
<ModalContent>
<ModalContent minW={'auto'}>
<ModalHeader></ModalHeader>
{!payId && <ModalCloseButton />}
@@ -86,7 +87,7 @@ const PayModal = ({ onClose }: { onClose: () => void }) => {
{!payId && (
<>
<Grid gridTemplateColumns={'repeat(4,1fr)'} gridGap={5} mb={4}>
{[5, 10, 20, 50].map((item) => (
{[10, 20, 50, 100].map((item) => (
<Button
key={item}
variant={item === inputVal ? 'solid' : 'outline'}
@@ -96,7 +97,7 @@ const PayModal = ({ onClose }: { onClose: () => void }) => {
</Button>
))}
</Grid>
<Box>
<Box mb={4}>
<Input
value={inputVal}
type={'number'}
@@ -107,6 +108,15 @@ const PayModal = ({ onClose }: { onClose: () => void }) => {
}}
></Input>
</Box>
<Markdown
source={`
| 计费项 | 价格: 元/ 1K tokens(包含上下文)|
| --- | --- |
| 知识库 - 索引 | 0.001 |
| chatgpt - 对话 | 0.025 |
| gpt4 - 对话 | 0.5 |
| 文件拆分 | 0.025 |`}
/>
</>
)}
{/* 付费二维码 */}

View File

@@ -82,7 +82,8 @@ export async function generateVector(): Promise<any> {
const vectors = await openaiEmbedding({
input: dataItems.map((item) => item.q),
userId,
type: 'training'
type: 'training',
mustPay: true
});
// 生成结果插入到 pg

View File

@@ -1,5 +1,11 @@
import { connectToDatabase, Bill, User, ShareChat } from '../mongo';
import { ChatModelMap, OpenAiChatEnum, ChatModelType, embeddingModel } from '@/constants/model';
import {
ChatModelMap,
OpenAiChatEnum,
ChatModelType,
embeddingModel,
embeddingPrice
} from '@/constants/model';
import { BillTypeEnum } from '@/constants/user';
export const pushChatBill = async ({
@@ -145,11 +151,9 @@ export const pushGenerateVectorBill = async ({
await connectToDatabase();
try {
const unitPrice = 0.4;
// 计算价格. 至少为1
const price = 0;
// let price = unitPrice * tokenLen;
// price = price > 1 ? price : 1;
let price = embeddingPrice * tokenLen;
price = price > 1 ? price : 1;
// 插入 Bill 记录
const res = await Bill.create({

View File

@@ -28,7 +28,7 @@ const UserSchema = new Schema({
balance: {
// 平台余额,不可提现
type: Number,
default: 0.5 * PRICE_SCALE
default: 2 * PRICE_SCALE
},
inviterId: {
// 谁邀请注册的