mirror of
https://github.com/labring/FastGPT.git
synced 2025-07-23 21:13:50 +00:00
4.8 preview (#1288)
* Revert "lafAccount add pat & re request when token invalid (#76)" (#77) This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be. * perf: workflow ux * system config * Newflow (#89) * docs: Add doc for Xinference (#1266) Signed-off-by: Carson Yang <yangchuansheng33@gmail.com> * Revert "lafAccount add pat & re request when token invalid (#76)" (#77) This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be. * perf: workflow ux * system config * Revert "lafAccount add pat & re request when token invalid (#76)" (#77) This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be. * Revert "lafAccount add pat & re request when token invalid (#76)" (#77) This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be. * Revert "lafAccount add pat & re request when token invalid (#76)" (#77) This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be. * rename code * move code * update flow * input type selector * perf: workflow runtime * feat: node adapt newflow * feat: adapt plugin * feat: 360 connection * check workflow * perf: flow 性能 * change plugin input type (#81) * change plugin input type * plugin label mode * perf: nodecard * debug * perf: debug ui * connection ui * change workflow ui (#82) * feat: workflow debug * adapt openAPI for new workflow (#83) * adapt openAPI for new workflow * i18n * perf: plugin debug * plugin input ui * delete * perf: global variable select * fix rebase * perf: workflow performance * feat: input render type icon * input icon * adapt flow (#84) * adapt newflow * temp * temp * fix * feat: app schedule trigger * feat: app schedule trigger * perf: schedule ui * feat: ioslatevm run js code * perf: workflow varialbe table ui * feat: adapt simple mode * feat: adapt input params * output * feat: adapt tamplate * fix: ts * add if-else module (#86) * perf: worker * if else node * perf: tiktoken worker * fix: ts * perf: tiktoken * fix if-else node (#87) * fix if-else node * type * fix * perf: audio render * perf: Parallel worker * log * perf: if else node * adapt plugin * prompt * perf: reference ui * reference ui * handle ux * template ui and plugin tool * adapt v1 workflow * adapt v1 workflow completions * perf: time variables * feat: workflow keyboard shortcuts * adapt v1 workflow * update workflow example doc (#88) * fix: simple mode select tool --------- Signed-off-by: Carson Yang <yangchuansheng33@gmail.com> Co-authored-by: Carson Yang <yangchuansheng33@gmail.com> Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com> * doc * perf: extract node * extra node field * update plugin version * doc * variable * change doc & fix prompt editor (#90) * fold workflow code * value type label --------- Signed-off-by: Carson Yang <yangchuansheng33@gmail.com> Co-authored-by: Carson Yang <yangchuansheng33@gmail.com> Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
File diff suppressed because one or more lines are too long
@@ -1,152 +0,0 @@
|
||||
/* Only the token of gpt-3.5-turbo is used */
|
||||
import type { ChatItemType } from '../../../core/chat/type';
|
||||
import { Tiktoken } from 'js-tiktoken/lite';
|
||||
import { chats2GPTMessages } from '../../../core/chat/adapt';
|
||||
import encodingJson from './cl100k_base.json';
|
||||
import {
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionContentPart,
|
||||
ChatCompletionCreateParams,
|
||||
ChatCompletionTool
|
||||
} from '../../../core/ai/type';
|
||||
import { ChatCompletionRequestMessageRoleEnum } from '../../../core/ai/constants';
|
||||
|
||||
/* init tikToken obj */
|
||||
export function getTikTokenEnc() {
|
||||
if (typeof window !== 'undefined' && window.TikToken) {
|
||||
return window.TikToken;
|
||||
}
|
||||
if (typeof global !== 'undefined' && global.TikToken) {
|
||||
return global.TikToken;
|
||||
}
|
||||
|
||||
const enc = new Tiktoken(encodingJson);
|
||||
|
||||
if (typeof window !== 'undefined') {
|
||||
window.TikToken = enc;
|
||||
}
|
||||
if (typeof global !== 'undefined') {
|
||||
global.TikToken = enc;
|
||||
}
|
||||
|
||||
return enc;
|
||||
}
|
||||
|
||||
/* count one prompt tokens */
|
||||
export function countPromptTokens(
|
||||
prompt: string | ChatCompletionContentPart[] | null | undefined = '',
|
||||
role: '' | `${ChatCompletionRequestMessageRoleEnum}` = ''
|
||||
) {
|
||||
const enc = getTikTokenEnc();
|
||||
const promptText = (() => {
|
||||
if (!prompt) return '';
|
||||
if (typeof prompt === 'string') return prompt;
|
||||
let promptText = '';
|
||||
prompt.forEach((item) => {
|
||||
if (item.type === 'text') {
|
||||
promptText += item.text;
|
||||
} else if (item.type === 'image_url') {
|
||||
promptText += item.image_url.url;
|
||||
}
|
||||
});
|
||||
return promptText;
|
||||
})();
|
||||
|
||||
const text = `${role}\n${promptText}`.trim();
|
||||
|
||||
try {
|
||||
const encodeText = enc.encode(text);
|
||||
const supplementaryToken = role ? 4 : 0;
|
||||
return encodeText.length + supplementaryToken;
|
||||
} catch (error) {
|
||||
return text.length;
|
||||
}
|
||||
}
|
||||
export const countToolsTokens = (
|
||||
tools?: ChatCompletionTool[] | ChatCompletionCreateParams.Function[]
|
||||
) => {
|
||||
if (!tools || tools.length === 0) return 0;
|
||||
|
||||
const enc = getTikTokenEnc();
|
||||
|
||||
const toolText = tools
|
||||
? JSON.stringify(tools)
|
||||
.replace('"', '')
|
||||
.replace('\n', '')
|
||||
.replace(/( ){2,}/g, ' ')
|
||||
: '';
|
||||
|
||||
return enc.encode(toolText).length;
|
||||
};
|
||||
|
||||
/* count messages tokens */
|
||||
export const countMessagesTokens = (messages: ChatItemType[]) => {
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: true });
|
||||
|
||||
return countGptMessagesTokens(adaptMessages);
|
||||
};
|
||||
export const countGptMessagesTokens = (
|
||||
messages: ChatCompletionMessageParam[],
|
||||
tools?: ChatCompletionTool[],
|
||||
functionCall?: ChatCompletionCreateParams.Function[]
|
||||
) =>
|
||||
messages.reduce((sum, item) => {
|
||||
// Evaluates the text of toolcall and functioncall
|
||||
const functionCallPrompt = (() => {
|
||||
let prompt = '';
|
||||
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
|
||||
const toolCalls = item.tool_calls;
|
||||
prompt +=
|
||||
toolCalls
|
||||
?.map((item) => `${item?.function?.name} ${item?.function?.arguments}`.trim())
|
||||
?.join('') || '';
|
||||
|
||||
const functionCall = item.function_call;
|
||||
prompt += `${functionCall?.name} ${functionCall?.arguments}`.trim();
|
||||
}
|
||||
return prompt;
|
||||
})();
|
||||
|
||||
const contentPrompt = (() => {
|
||||
if (!item.content) return '';
|
||||
if (typeof item.content === 'string') return item.content;
|
||||
return item.content
|
||||
.map((item) => {
|
||||
if (item.type === 'text') return item.text;
|
||||
return '';
|
||||
})
|
||||
.join('');
|
||||
})();
|
||||
|
||||
return sum + countPromptTokens(`${contentPrompt}${functionCallPrompt}`, item.role);
|
||||
}, 0) +
|
||||
countToolsTokens(tools) +
|
||||
countToolsTokens(functionCall);
|
||||
|
||||
/* slice messages from top to bottom by maxTokens */
|
||||
export function sliceMessagesTB({
|
||||
messages,
|
||||
maxTokens
|
||||
}: {
|
||||
messages: ChatItemType[];
|
||||
maxTokens: number;
|
||||
}) {
|
||||
const adaptMessages = chats2GPTMessages({ messages, reserveId: true });
|
||||
let reduceTokens = maxTokens;
|
||||
let result: ChatItemType[] = [];
|
||||
|
||||
for (let i = 0; i < adaptMessages.length; i++) {
|
||||
const item = adaptMessages[i];
|
||||
|
||||
const tokens = countPromptTokens(item.content, item.role);
|
||||
reduceTokens -= tokens;
|
||||
|
||||
if (reduceTokens > 0) {
|
||||
result.push(messages[i]);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return result.length === 0 && messages[0] ? [messages[0]] : result;
|
||||
}
|
@@ -1,5 +0,0 @@
|
||||
import type { Tiktoken } from 'js-tiktoken';
|
||||
|
||||
declare global {
|
||||
var TikToken: Tiktoken;
|
||||
}
|
@@ -1,5 +1,36 @@
|
||||
import dayjs from 'dayjs';
|
||||
import cronParser from 'cron-parser';
|
||||
|
||||
export const formatTime2YMDHM = (time?: Date) =>
|
||||
time ? dayjs(time).format('YYYY-MM-DD HH:mm') : '';
|
||||
export const formatTime2YMD = (time?: Date) => (time ? dayjs(time).format('YYYY-MM-DD') : '');
|
||||
|
||||
/* cron time parse */
|
||||
export const cronParser2Fields = (cronString: string) => {
|
||||
try {
|
||||
const cronField = cronParser.parseExpression(cronString).fields;
|
||||
return cronField;
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
// 根据cron表达式和时区获取下一个时间
|
||||
export const getNextTimeByCronStringAndTimezone = ({
|
||||
cronString,
|
||||
timezone
|
||||
}: {
|
||||
cronString: string;
|
||||
timezone: string;
|
||||
}) => {
|
||||
try {
|
||||
const options = {
|
||||
currentDate: dayjs().tz(timezone).format(),
|
||||
tz: timezone
|
||||
};
|
||||
const interval = cronParser.parseExpression(cronString, options);
|
||||
const date = interval.next().toString();
|
||||
return new Date(date);
|
||||
} catch (error) {
|
||||
return new Date('2099');
|
||||
}
|
||||
};
|
||||
|
@@ -28,7 +28,9 @@ export const simpleText = (text = '') => {
|
||||
/*
|
||||
replace {{variable}} to value
|
||||
*/
|
||||
export function replaceVariable(text: string, obj: Record<string, string | number>) {
|
||||
export function replaceVariable(text: any, obj: Record<string, string | number>) {
|
||||
if (!(typeof text === 'string')) return text;
|
||||
|
||||
for (const key in obj) {
|
||||
const val = obj[key];
|
||||
if (!['string', 'number'].includes(typeof val)) continue;
|
||||
|
Reference in New Issue
Block a user