V4.7-alpha (#985)

Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
Archer
2024-03-13 10:50:02 +08:00
committed by GitHub
parent 5bca15f12f
commit 9501c3f3a1
170 changed files with 5786 additions and 2342 deletions

View File

@@ -1,4 +1,4 @@
import type { ChatMessageItemType } from '@fastgpt/global/core/ai/type.d';
import type { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type.d';
import { getAIApi } from '../config';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
@@ -8,10 +8,10 @@ export async function createQuestionGuide({
messages,
model
}: {
messages: ChatMessageItemType[];
messages: ChatCompletionMessageParam[];
model: string;
}) {
const concatMessages: ChatMessageItemType[] = [
const concatMessages: ChatCompletionMessageParam[] = [
...messages,
{
role: 'user',

View File

@@ -2,6 +2,7 @@ import { replaceVariable } from '@fastgpt/global/common/string/tools';
import { getAIApi } from '../config';
import { ChatItemType } from '@fastgpt/global/core/chat/type';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import { ChatCompletionMessageParam } from '@fastgpt/global/core/ai/type';
/*
query extension - 问题扩展
@@ -133,7 +134,7 @@ A: ${chatBg}
histories: concatFewShot
})
}
];
] as ChatCompletionMessageParam[];
const result = await ai.chat.completions.create({
model: model,
temperature: 0.01,

View File

@@ -10,6 +10,7 @@ import {
import { appCollectionName } from '../app/schema';
import { userCollectionName } from '../../support/user/schema';
import { ModuleOutputKeyEnum } from '@fastgpt/global/core/module/constants';
import { DispatchNodeResponseKeyEnum } from '@fastgpt/global/core/module/runtime/constants';
export const ChatItemCollectionName = 'chatitems';
@@ -54,8 +55,8 @@ const ChatItemSchema = new Schema({
},
value: {
// chat content
type: String,
default: ''
type: Array,
default: []
},
userGoodFeedback: {
type: String
@@ -75,7 +76,7 @@ const ChatItemSchema = new Schema({
a: String
}
},
[ModuleOutputKeyEnum.responseData]: {
[DispatchNodeResponseKeyEnum.nodeResponse]: {
type: Array,
default: []
}

View File

@@ -1,6 +1,7 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type';
import type { ChatItemType, ChatItemValueItemType } from '@fastgpt/global/core/chat/type';
import { MongoChatItem } from './chatItemSchema';
import { addLog } from '../../common/system/log';
import { ChatItemValueTypeEnum } from '@fastgpt/global/core/chat/constants';
export async function getChatItems({
appId,
@@ -24,8 +25,27 @@ export async function getChatItems({
history.reverse();
history.forEach((item) => {
// @ts-ignore
item.value = adaptStringValue(item.value);
});
return { history };
}
/* 临时适配旧的对话记录,清洗完数据后可删除4.30刪除) */
export const adaptStringValue = (value: any): ChatItemValueItemType[] => {
if (typeof value === 'string') {
return [
{
type: ChatItemValueTypeEnum.text,
text: {
content: value
}
}
];
}
return value;
};
export const addCustomFeedbacks = async ({
appId,

View File

@@ -1,21 +1,40 @@
import type { ChatItemType } from '@fastgpt/global/core/chat/type.d';
import { ChatRoleEnum, IMG_BLOCK_KEY } from '@fastgpt/global/core/chat/constants';
import { countMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import type { ChatCompletionContentPart } from '@fastgpt/global/core/ai/type.d';
import { countGptMessagesTokens } from '@fastgpt/global/common/string/tiktoken';
import type {
ChatCompletionContentPart,
ChatCompletionMessageParam
} from '@fastgpt/global/core/ai/type.d';
import axios from 'axios';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
/* slice chat context by tokens */
export function ChatContextFilter({
export function filterGPTMessageByMaxTokens({
messages = [],
maxTokens
}: {
messages: ChatItemType[];
messages: ChatCompletionMessageParam[];
maxTokens: number;
}) {
if (!Array.isArray(messages)) {
return [];
}
const rawTextLen = messages.reduce((sum, item) => sum + item.value.length, 0);
const rawTextLen = messages.reduce((sum, item) => {
if (typeof item.content === 'string') {
return sum + item.content.length;
}
if (Array.isArray(item.content)) {
return (
sum +
item.content.reduce((sum, item) => {
if (item.type === 'text') {
return sum + item.text.length;
}
return sum;
}, 0)
);
}
return sum;
}, 0);
// If the text length is less than half of the maximum token, no calculation is required
if (rawTextLen < maxTokens * 0.5) {
@@ -23,19 +42,21 @@ export function ChatContextFilter({
}
// filter startWith system prompt
const chatStartIndex = messages.findIndex((item) => item.obj !== ChatRoleEnum.System);
const systemPrompts: ChatItemType[] = messages.slice(0, chatStartIndex);
const chatPrompts: ChatItemType[] = messages.slice(chatStartIndex);
const chatStartIndex = messages.findIndex(
(item) => item.role !== ChatCompletionRequestMessageRoleEnum.System
);
const systemPrompts: ChatCompletionMessageParam[] = messages.slice(0, chatStartIndex);
const chatPrompts: ChatCompletionMessageParam[] = messages.slice(chatStartIndex);
// reduce token of systemPrompt
maxTokens -= countMessagesTokens(systemPrompts);
maxTokens -= countGptMessagesTokens(systemPrompts);
// Save the last chat prompt(question)
const question = chatPrompts.pop();
if (!question) {
return systemPrompts;
}
const chats: ChatItemType[] = [question];
const chats: ChatCompletionMessageParam[] = [question];
// 从后往前截取对话内容, 每次需要截取2个
while (1) {
@@ -45,7 +66,7 @@ export function ChatContextFilter({
break;
}
const tokens = countMessagesTokens([assistant, user]);
const tokens = countGptMessagesTokens([assistant, user]);
maxTokens -= tokens;
/* 整体 tokens 超出范围,截断 */
if (maxTokens < 0) {
@@ -62,6 +83,30 @@ export function ChatContextFilter({
return [...systemPrompts, ...chats];
}
export const formatGPTMessagesInRequestBefore = (messages: ChatCompletionMessageParam[]) => {
return messages
.map((item) => {
if (!item.content) return;
if (typeof item.content === 'string') {
return {
...item,
content: item.content.trim()
};
}
// array
if (item.content.length === 0) return;
if (item.content.length === 1 && item.content[0].type === 'text') {
return {
...item,
content: item.content[0].text
};
}
return item;
})
.filter(Boolean) as ChatCompletionMessageParam[];
};
/**
string to vision model. Follow the markdown code block rule for interception:
@@ -175,3 +220,21 @@ export async function formatStr2ChatContent(str: string) {
return content ? content : null;
}
export const loadChatImgToBase64 = async (content: string | ChatCompletionContentPart[]) => {
if (typeof content === 'string') {
return content;
}
return Promise.all(
content.map(async (item) => {
if (item.type === 'text') return item;
// load image
const response = await axios.get(item.image_url.url, {
responseType: 'arraybuffer'
});
const base64 = Buffer.from(response.data).toString('base64');
item.image_url.url = `data:${response.headers['content-type']};base64,${base64}`;
return item;
})
);
};