4.8 preview (#1288)

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* perf: workflow ux

* system config

* Newflow (#89)

* docs: Add doc for Xinference (#1266)

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* perf: workflow ux

* system config

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* Revert "lafAccount add pat & re request when token invalid (#76)" (#77)

This reverts commit 83d85dfe37adcaef4833385ea52ee79fd84720be.

* rename code

* move code

* update flow

* input type selector

* perf: workflow runtime

* feat: node adapt newflow

* feat: adapt plugin

* feat: 360 connection

* check workflow

* perf: flow 性能

* change plugin input type (#81)

* change plugin input type

* plugin label mode

* perf: nodecard

* debug

* perf: debug ui

* connection ui

* change workflow ui (#82)

* feat: workflow debug

* adapt openAPI for new workflow (#83)

* adapt openAPI for new workflow

* i18n

* perf: plugin debug

* plugin input ui

* delete

* perf: global variable select

* fix rebase

* perf: workflow performance

* feat: input render type icon

* input icon

* adapt flow (#84)

* adapt newflow

* temp

* temp

* fix

* feat: app schedule trigger

* feat: app schedule trigger

* perf: schedule ui

* feat: ioslatevm run js code

* perf: workflow varialbe table ui

* feat: adapt simple mode

* feat: adapt input params

* output

* feat: adapt tamplate

* fix: ts

* add if-else module (#86)

* perf: worker

* if else node

* perf: tiktoken worker

* fix: ts

* perf: tiktoken

* fix if-else node (#87)

* fix if-else node

* type

* fix

* perf: audio render

* perf: Parallel worker

* log

* perf: if else node

* adapt plugin

* prompt

* perf: reference ui

* reference ui

* handle ux

* template ui and plugin tool

* adapt v1 workflow

* adapt v1 workflow completions

* perf: time variables

* feat: workflow keyboard shortcuts

* adapt v1 workflow

* update workflow example doc (#88)

* fix: simple mode select tool

---------

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>

* doc

* perf: extract node

* extra node field

* update plugin version

* doc

* variable

* change doc & fix prompt editor (#90)

* fold workflow code

* value type label

---------

Signed-off-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: Carson Yang <yangchuansheng33@gmail.com>
Co-authored-by: heheer <71265218+newfish-cmyk@users.noreply.github.com>
This commit is contained in:
Archer
2024-04-25 17:51:20 +08:00
committed by GitHub
parent b08d81f887
commit 439c819ff1
505 changed files with 23570 additions and 18215 deletions

View File

@@ -0,0 +1,60 @@
import { parentPort } from 'worker_threads';
import TurndownService from 'turndown';
//@ts-ignore
import domino from 'domino';
//@ts-ignore
import * as turndownPluginGfm from 'joplin-turndown-plugin-gfm';
const turndownService = new TurndownService({
headingStyle: 'atx',
bulletListMarker: '-',
codeBlockStyle: 'fenced',
fence: '```',
emDelimiter: '_',
strongDelimiter: '**',
linkStyle: 'inlined',
linkReferenceStyle: 'full'
});
parentPort?.on('message', (params: { html: string }) => {
const html2md = (html: string): string => {
try {
const window = domino.createWindow(html);
const document = window.document;
turndownService.remove(['i', 'script', 'iframe']);
turndownService.addRule('codeBlock', {
filter: 'pre',
replacement(_, node) {
const content = node.textContent?.trim() || '';
// @ts-ignore
const codeName = node?._attrsByQName?.class?.data?.trim() || '';
return `\n\`\`\`${codeName}\n${content}\n\`\`\`\n`;
}
});
turndownService.use(turndownPluginGfm.gfm);
// @ts-ignore
return turndownService.turndown(document);
} catch (error) {
return '';
}
};
try {
const md = html2md(params?.html || '');
parentPort?.postMessage({
type: 'success',
data: md
});
} catch (error) {
parentPort?.postMessage({
type: 'error',
data: error
});
}
global?.close?.();
});

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,116 @@
/* Only the token of gpt-3.5-turbo is used */
import { Tiktoken } from 'js-tiktoken/lite';
import encodingJson from './cl100k_base.json';
import {
ChatCompletionMessageParam,
ChatCompletionContentPart,
ChatCompletionCreateParams,
ChatCompletionTool
} from '@fastgpt/global/core/ai/type';
import { ChatCompletionRequestMessageRoleEnum } from '@fastgpt/global/core/ai/constants';
import { parentPort } from 'worker_threads';
const enc = new Tiktoken(encodingJson);
/* count messages tokens */
parentPort?.on(
'message',
({
id,
messages,
tools,
functionCall
}: {
id: string;
messages: ChatCompletionMessageParam[];
tools?: ChatCompletionTool[];
functionCall?: ChatCompletionCreateParams.Function[];
}) => {
const start = Date.now();
/* count one prompt tokens */
const countPromptTokens = (
prompt: string | ChatCompletionContentPart[] | null | undefined = '',
role: '' | `${ChatCompletionRequestMessageRoleEnum}` = ''
) => {
const promptText = (() => {
if (!prompt) return '';
if (typeof prompt === 'string') return prompt;
let promptText = '';
prompt.forEach((item) => {
if (item.type === 'text') {
promptText += item.text;
} else if (item.type === 'image_url') {
promptText += item.image_url.url;
}
});
return promptText;
})();
const text = `${role}\n${promptText}`.trim();
try {
const encodeText = enc.encode(text);
const supplementaryToken = role ? 4 : 0;
return encodeText.length + supplementaryToken;
} catch (error) {
return text.length;
}
};
const countToolsTokens = (
tools?: ChatCompletionTool[] | ChatCompletionCreateParams.Function[]
) => {
if (!tools || tools.length === 0) return 0;
const toolText = tools
? JSON.stringify(tools)
.replace('"', '')
.replace('\n', '')
.replace(/( ){2,}/g, ' ')
: '';
return enc.encode(toolText).length;
};
const total =
messages.reduce((sum, item) => {
// Evaluates the text of toolcall and functioncall
const functionCallPrompt = (() => {
let prompt = '';
if (item.role === ChatCompletionRequestMessageRoleEnum.Assistant) {
const toolCalls = item.tool_calls;
prompt +=
toolCalls
?.map((item) => `${item?.function?.name} ${item?.function?.arguments}`.trim())
?.join('') || '';
const functionCall = item.function_call;
prompt += `${functionCall?.name} ${functionCall?.arguments}`.trim();
}
return prompt;
})();
const contentPrompt = (() => {
if (!item.content) return '';
if (typeof item.content === 'string') return item.content;
return item.content
.map((item) => {
if (item.type === 'text') return item.text;
return '';
})
.join('');
})();
return sum + countPromptTokens(`${contentPrompt}${functionCallPrompt}`, item.role);
}, 0) +
countToolsTokens(tools) +
countToolsTokens(functionCall);
parentPort?.postMessage({
id,
type: 'success',
data: total
});
global?.close?.();
}
);

View File

@@ -0,0 +1,32 @@
import { Worker } from 'worker_threads';
import path from 'path';
export enum WorkerNameEnum {
htmlStr2Md = 'htmlStr2Md',
countGptMessagesTokens = 'countGptMessagesTokens'
}
export const getWorker = (name: WorkerNameEnum) => {
const workerPath = path.join(process.cwd(), '.next', 'server', 'worker', `${name}.js`);
return new Worker(workerPath);
};
export const runWorker = <T = any>(name: WorkerNameEnum, params?: Record<string, any>) => {
return new Promise<T>((resolve, reject) => {
const worker = getWorker(name);
worker.postMessage(params);
worker.on('message', (msg: { type: 'success' | 'error'; data: any }) => {
if (msg.type === 'error') return reject(msg.data);
resolve(msg.data);
});
worker.on('error', (err) => {
worker.terminate();
reject(err);
});
});
};