Feat: Workflow loop node;feat: support openai o1;perf: query extension prompt;fix: intro was not delivered when the datase was created (#2719)

* feat: loop node (#2675)

* loop node frontend

* loop-node

* fix-code

* fix version

* fix

* fix

* fix

* perf: loop array code

* perf: get histories error tip

* feat: support openai o1

* perf: query extension prompt

* feat: 4811 doc

* remove log

* fix: loop node zindex & variable picker type (#2710)

* perf: performance

* perf: workflow performance

* remove uninvalid code

* perf:code

* fix: invoice table refresh

* perf: loop node data type

* fix: loop node store assistants

* perf: target connection

* feat: loop node support help line

* perf: add default icon

---------

Co-authored-by: heheer <heheer@sealos.io>
This commit is contained in:
Archer
2024-09-15 22:41:05 +08:00
committed by shilin66
parent a1096fee6a
commit 1afc5c3953
86 changed files with 2001 additions and 718 deletions

View File

@@ -96,7 +96,7 @@ export async function generateQA(): Promise<any> {
addLog.info(`[QA Queue] Start`);
try {
const model = getLLMModel(data.model)?.model;
const modelData = getLLMModel(data.model);
const prompt = `${data.prompt || Prompt_AgentQA.description}
${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
@@ -112,10 +112,11 @@ ${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
timeout: 600000
});
const chatResponse = await ai.chat.completions.create({
model,
model: modelData.model,
temperature: 0.3,
messages: await loadRequestMessages({ messages, useVision: false }),
stream: false
stream: false,
...modelData.defaultConfig
});
const answer = chatResponse.choices?.[0].message?.content || '';
@@ -150,7 +151,7 @@ ${replaceVariable(Prompt_AgentQA.fixedText, { text })}`;
tmbId: data.tmbId,
tokens: await countGptMessagesTokens(messages),
billId: data.billId,
model
model: modelData.model
});
} else {
addLog.info(`QA result 0:`, { answer });