14 Commits

Author SHA1 Message Date
Vinlic
984d724367 Release 0.0.33 2024-04-30 11:54:17 +08:00
Vinlic
889c874264 支持kimi+智能体调用 2024-04-30 11:53:51 +08:00
Vinlic
6105410dd2 Release 0.0.32 2024-04-28 17:54:44 +08:00
Vinlic
01ff5c250a 处理首轮传送文件时导致对话合并问题 2024-04-28 17:54:31 +08:00
Vinlic
82a8359634 Release 0.0.31 2024-04-28 14:16:27 +08:00
Vinlic
7275ab7e11 Merge branch 'master' of https://github.com/LLM-Red-Team/kimi-free-api 2024-04-28 14:14:28 +08:00
Vinlic
d862808226 update README 2024-04-28 14:14:13 +08:00
Vinlic
7cc6033201 支持原生多轮对话 2024-04-28 14:11:16 +08:00
Vinlic科技
8f72c5de78 Merge pull request #89 from KPCOFGS/master
更新了英文的README_EN.md
2024-04-28 03:29:25 +08:00
Shixian Sheng
72df4e1fc1 Update README_EN.md 2024-04-27 12:18:13 -04:00
Vinlic科技
9b00be5883 Update README.md 2024-04-26 16:49:17 +08:00
Vinlic科技
61cc3a4655 Update README.md 2024-04-26 16:48:30 +08:00
Vinlic科技
1aa45264f1 Merge pull request #86 from Yanyutin753/main
Create sync.yml to update code
2024-04-25 15:17:58 +08:00
Yanyutin753
56caa486c8 Create sync.yml to update code 2024-04-25 15:14:06 +08:00
7 changed files with 128 additions and 38 deletions

48
.github/workflows/sync.yml vendored Normal file
View File

@@ -0,0 +1,48 @@
name: Upstream Sync
permissions:
contents: write
issues: write
actions: write
on:
schedule:
- cron: '0 * * * *' # every hour
workflow_dispatch:
jobs:
sync_latest_from_upstream:
name: Sync latest commits from upstream repo
runs-on: ubuntu-latest
if: ${{ github.event.repository.fork }}
steps:
- uses: actions/checkout@v4
- name: Clean issue notice
uses: actions-cool/issues-helper@v3
with:
actions: 'close-issues'
labels: '🚨 Sync Fail'
- name: Sync upstream changes
id: sync
uses: aormsby/Fork-Sync-With-Upstream-action@v3.4
with:
upstream_sync_repo: LLM-Red-Team/kimi-free-api
upstream_sync_branch: master
target_sync_branch: master
target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set
test_mode: false
- name: Sync check
if: failure()
uses: actions-cool/issues-helper@v3
with:
actions: 'create-issue'
title: '🚨 同步失败 | Sync Fail'
labels: '🚨 Sync Fail'
body: |
Due to a change in the workflow file of the LLM-Red-Team/kimi-free-api upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed [Tutorial][tutorial-en-US] for instructions.
由于 LLM-Red-Team/kimi-free-api 上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次,

View File

@@ -11,7 +11,7 @@
![](https://img.shields.io/github/forks/llm-red-team/kimi-free-api.svg)
![](https://img.shields.io/docker/pulls/vinlic/kimi-free-api.svg)
支持高速流式输出、支持多轮对话、支持联网搜索、支持长文档解读、支持图像解析零配置部署多路token支持自动清理会话痕迹。
支持高速流式输出、支持多轮对话、支持联网搜索、支持智能体对话、支持长文档解读、支持图像解析零配置部署多路token支持自动清理会话痕迹。
与ChatGPT接口完全兼容。
@@ -42,6 +42,7 @@
* [Vercel部署](#Vercel部署)
* [Zeabur部署](#Zeabur部署)
* [原生部署](#原生部署)
* [推荐使用客户端](#推荐使用客户端)
* [接口列表](#接口列表)
* [对话补全](#对话补全)
* [文档解读](#文档解读)
@@ -84,6 +85,12 @@ https://udify.app/chat/Po0F6BMJ15q5vu2P
![联网搜索](./doc/example-2.png)
### 智能体对话Demo
此处使用 [翻译通](https://kimi.moonshot.cn/chat/coo6l3pkqq4ri39f36bg) 智能体。
![智能体对话](./doc/example-7.png)
### 长文档解读Demo
![长文档解读](./doc/example-5.png)
@@ -241,6 +248,14 @@ pm2 reload kimi-free-api
pm2 stop kimi-free-api
```
## 推荐使用客户端
使用以下二次开发客户端接入free-api系列项目更快更简单支持文档/图像上传!
由 [Clivia](https://github.com/Yanyutin753/lobe-chat) 二次开发的LobeChat [https://github.com/Yanyutin753/lobe-chat](https://github.com/Yanyutin753/lobe-chat)
由 [时光@](https://github.com/SuYxh) 二次开发的ChatGPT Web [https://github.com/SuYxh/chatgpt-web-sea](https://github.com/SuYxh/chatgpt-web-sea)
## 接口列表
目前支持与openai兼容的 `/v1/chat/completions` 接口可自行使用与openai或其他兼容的客户端接入接口或者使用 [dify](https://dify.ai/) 等线上服务接入使用。
@@ -260,8 +275,12 @@ Authorization: Bearer [refresh_token]
请求数据:
```json
{
// 模型名称随意填写如果不希望输出检索过程模型名称请包含silent_search
// model随意填写如果不希望输出检索过程模型名称请包含silent_search
// 如果使用kimi+智能体model请填写智能体ID就是浏览器地址栏上尾部的一串英文+数字20个字符的ID
"model": "kimi",
// 目前多轮对话基于消息合并实现某些场景可能导致能力下降且受单轮最大Token数限制
// 如果您想获得原生的多轮对话体验可以传入首轮消息获得的id来接续上下文注意如果使用这个首轮必须传none否则第二轮会空响应
// "conversation_id": "cnndivilnl96vah411dg",
"messages": [
{
"role": "user",
@@ -278,6 +297,7 @@ Authorization: Bearer [refresh_token]
响应数据:
```json
{
// 如果想获得原生多轮对话体验此id你可以传入到下一轮对话的conversation_id来接续上下文
"id": "cnndivilnl96vah411dg",
"model": "kimi",
"object": "chat.completion",

View File

@@ -20,7 +20,7 @@ Ali Tongyi (Qwen) API to API [qwen-free-api](https://github.com/LLM-Red-Team/qwe
ZhipuAI (ChatGLM) API to API [glm-free-api](https://github.com/LLM-Red-Team/glm-free-api)
MetaAI (metaso) API to API [metaso-free-api](https://github.com/LLM-Red-Team/metaso-free-api)
Meta Sota (metaso) API to API [metaso-free-api](https://github.com/LLM-Red-Team/metaso-free-api)
Iflytek Spark (Spark) API to API [spark-free-api](https://github.com/LLM-Red-Team/spark-free-api)

BIN
doc/example-7.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

View File

@@ -1,6 +1,6 @@
{
"name": "kimi-free-api",
"version": "0.0.30",
"version": "0.0.33",
"description": "Kimi Free API Server",
"type": "module",
"main": "dist/index.js",

View File

@@ -237,9 +237,10 @@ async function promptSnippetSubmit(query: string, refreshToken: string) {
* @param messages 参考gpt系列消息格式多轮对话请完整提供上下文
* @param refreshToken 用于刷新access_token的refresh_token
* @param useSearch 是否开启联网搜索
* @param refConvId 引用会话ID
* @param retryCount 重试次数
*/
async function createCompletion(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, retryCount = 0) {
async function createCompletion(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, refConvId?: string, retryCount = 0) {
return (async () => {
logger.info(messages);
@@ -252,15 +253,16 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
.catch(err => logger.error(err));
// 创建会话
const convId = await createConversation("未命名会话", refreshToken);
const convId = /[0-9a-zA-Z]{20}/.test(refConvId) ? refConvId : await createConversation("未命名会话", refreshToken);
// 请求流
const {
accessToken,
userId
} = await acquireToken(refreshToken);
const sendMessages = messagesPrepare(messages);
const sendMessages = messagesPrepare(messages, !!refConvId);
const result = await axios.post(`https://kimi.moonshot.cn/api/chat/${convId}/completion/stream`, {
kimiplus_id: /^[0-9a-z]{20}$/.test(model) ? model : undefined,
messages: sendMessages,
refs,
use_search: useSearch
@@ -268,6 +270,7 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
headers: {
Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/chat/${convId}`,
'Priority': 'u=1, i',
'X-Traffic-Id': userId,
...FAKE_HEADERS
},
@@ -283,7 +286,8 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`);
// 异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略
removeConversation(convId, refreshToken)
// 如果引用会话将不会清除,因为我们不知道什么时候你会结束会话
!refConvId && removeConversation(convId, refreshToken)
.catch(err => console.error(err));
promptSnippetSubmit(sendMessages[0].content, refreshToken)
.catch(err => console.error(err));
@@ -296,7 +300,7 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`);
return (async () => {
await new Promise(resolve => setTimeout(resolve, RETRY_DELAY));
return createCompletion(model, messages, refreshToken, useSearch, retryCount + 1);
return createCompletion(model, messages, refreshToken, useSearch, refConvId, retryCount + 1);
})();
}
throw err;
@@ -310,9 +314,10 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
* @param messages 参考gpt系列消息格式多轮对话请完整提供上下文
* @param refreshToken 用于刷新access_token的refresh_token
* @param useSearch 是否开启联网搜索
* @param refConvId 引用会话ID
* @param retryCount 重试次数
*/
async function createCompletionStream(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, retryCount = 0) {
async function createCompletionStream(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, refConvId?: string, retryCount = 0) {
return (async () => {
logger.info(messages);
@@ -325,15 +330,16 @@ async function createCompletionStream(model = MODEL_NAME, messages: any[], refre
.catch(err => logger.error(err));
// 创建会话
const convId = await createConversation("未命名会话", refreshToken);
const convId = /[0-9a-zA-Z]{20}/.test(refConvId) ? refConvId : await createConversation("未命名会话", refreshToken);
// 请求流
const {
accessToken,
userId
} = await acquireToken(refreshToken);
const sendMessages = messagesPrepare(messages);
const sendMessages = messagesPrepare(messages, !!refConvId);
const result = await axios.post(`https://kimi.moonshot.cn/api/chat/${convId}/completion/stream`, {
kimiplus_id: /^[0-9a-z]{20}$/.test(model) ? model : undefined,
messages: sendMessages,
refs,
use_search: useSearch
@@ -343,6 +349,7 @@ async function createCompletionStream(model = MODEL_NAME, messages: any[], refre
headers: {
Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/chat/${convId}`,
'Priority': 'u=1, i',
'X-Traffic-Id': userId,
...FAKE_HEADERS
},
@@ -354,7 +361,8 @@ async function createCompletionStream(model = MODEL_NAME, messages: any[], refre
return createTransStream(model, convId, result.data, () => {
logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`);
// 流传输结束后异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略
removeConversation(convId, refreshToken)
// 如果引用会话将不会清除,因为我们不知道什么时候你会结束会话
!refConvId && removeConversation(convId, refreshToken)
.catch(err => console.error(err));
promptSnippetSubmit(sendMessages[0].content, refreshToken)
.catch(err => console.error(err));
@@ -366,7 +374,7 @@ async function createCompletionStream(model = MODEL_NAME, messages: any[], refre
logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`);
return (async () => {
await new Promise(resolve => setTimeout(resolve, RETRY_DELAY));
return createCompletionStream(model, messages, refreshToken, useSearch, retryCount + 1);
return createCompletionStream(model, messages, refreshToken, useSearch, refConvId, retryCount + 1);
})();
}
throw err;
@@ -447,14 +455,28 @@ function extractRefFileUrls(messages: any[]) {
* user:新消息
*
* @param messages 参考gpt系列消息格式多轮对话请完整提供上下文
* @param isRefConv 是否为引用会话
*/
function messagesPrepare(messages: any[]) {
// 注入消息提升注意力
let latestMessage = messages[messages.length - 1];
let hasFileOrImage = Array.isArray(latestMessage.content)
&& latestMessage.content.some(v => (typeof v === 'object' && ['file', 'image_url'].includes(v['type'])));
// 第二轮开始注入system prompt
if (messages.length > 2) {
function messagesPrepare(messages: any[], isRefConv = false) {
let content;
if (isRefConv || messages.length < 2) {
content = messages.reduce((content, message) => {
if (_.isArray(message.content)) {
return message.content.reduce((_content, v) => {
if (!_.isObject(v) || v['type'] != 'text') return _content;
return _content + `${v["text"] || ""}\n`;
}, content);
}
return content += `${message.role == 'user' ? wrapUrlsToTags(message.content) : message.content}\n`;
}, '')
logger.info("\n透传内容\n" + content);
}
else {
// 注入消息提升注意力
let latestMessage = messages[messages.length - 1];
let hasFileOrImage = Array.isArray(latestMessage.content)
&& latestMessage.content.some(v => (typeof v === 'object' && ['file', 'image_url'].includes(v['type'])));
// 第二轮开始注入system prompt
if (hasFileOrImage) {
let newFileMessage = {
"content": "关注用户最新发送文件和消息",
@@ -470,18 +492,18 @@ function messagesPrepare(messages: any[]) {
messages.splice(messages.length - 1, 0, newTextMessage);
logger.info("注入提升尾部消息注意力system prompt");
}
content = messages.reduce((content, message) => {
if (_.isArray(message.content)) {
return message.content.reduce((_content, v) => {
if (!_.isObject(v) || v['type'] != 'text') return _content;
return _content + `${message.role || "user"}:${v["text"] || ""}\n`;
}, content);
}
return content += `${message.role || "user"}:${message.role == 'user' ? wrapUrlsToTags(message.content) : message.content}\n`;
}, '')
logger.info("\n对话合并\n" + content);
}
const content = messages.reduce((content, message) => {
if (Array.isArray(message.content)) {
return message.content.reduce((_content, v) => {
if (!_.isObject(v) || v['type'] != 'text') return _content;
return _content + `${message.role || "user"}:${v["text"] || ""}\n`;
}, content);
}
return content += `${message.role || "user"}:${message.role == 'user' ? wrapUrlsToTags(message.content) : message.content}\n`;
}, '');
logger.info("\n对话合并\n" + content);
return [
{ role: 'user', content }
]
@@ -648,8 +670,8 @@ async function uploadFile(fileUrl: string, refreshToken: string) {
...FAKE_HEADERS
}
})
.then(() => resolve(true))
.catch(() => resolve(false));
.then(() => resolve(true))
.catch(() => resolve(false));
});
}

View File

@@ -13,22 +13,22 @@ export default {
'/completions': async (request: Request) => {
request
.validate('body.conversation_id', v => _.isUndefined(v) || _.isString(v))
.validate('body.messages', _.isArray)
.validate('headers.authorization', _.isString)
// refresh_token切分
const tokens = chat.tokenSplit(request.headers.authorization);
// 随机挑选一个refresh_token
const token = _.sample(tokens);
const model = request.body.model;
const messages = request.body.messages;
if (request.body.stream) {
const stream = await chat.createCompletionStream(model, messages, token, request.body.use_search);
const { model, conversation_id: convId, messages, stream, use_search } = request.body;
if (stream) {
const stream = await chat.createCompletionStream(model, messages, token, use_search, convId);
return new Response(stream, {
type: "text/event-stream"
});
}
else
return await chat.createCompletion(model, messages, token, request.body.use_search);
return await chat.createCompletion(model, messages, token, use_search, convId);
}
}