39 Commits

Author SHA1 Message Date
Vinlic
82a8359634 Release 0.0.31 2024-04-28 14:16:27 +08:00
Vinlic
7275ab7e11 Merge branch 'master' of https://github.com/LLM-Red-Team/kimi-free-api 2024-04-28 14:14:28 +08:00
Vinlic
d862808226 update README 2024-04-28 14:14:13 +08:00
Vinlic
7cc6033201 支持原生多轮对话 2024-04-28 14:11:16 +08:00
Vinlic科技
8f72c5de78 Merge pull request #89 from KPCOFGS/master
更新了英文的README_EN.md
2024-04-28 03:29:25 +08:00
Shixian Sheng
72df4e1fc1 Update README_EN.md 2024-04-27 12:18:13 -04:00
Vinlic科技
9b00be5883 Update README.md 2024-04-26 16:49:17 +08:00
Vinlic科技
61cc3a4655 Update README.md 2024-04-26 16:48:30 +08:00
Vinlic科技
1aa45264f1 Merge pull request #86 from Yanyutin753/main
Create sync.yml to update code
2024-04-25 15:17:58 +08:00
Yanyutin753
56caa486c8 Create sync.yml to update code 2024-04-25 15:14:06 +08:00
Vinlic
2aa6465a36 Release 0.0.30 2024-04-25 10:49:26 +08:00
Vinlic
09250f208a Merge branch 'master' of https://github.com/LLM-Red-Team/kimi-free-api 2024-04-25 10:49:17 +08:00
Vinlic
a2d5ab9390 修复某些大文件无法正常上传处理问题 2024-04-25 10:47:57 +08:00
Vinlic科技
fe584180b1 Merge pull request #84 from KPCOFGS/master
更新了中英文README.md文件
2024-04-25 08:59:42 +08:00
Shi Sheng
c1c601b498 Update README_EN.md 2024-04-24 20:51:11 -04:00
Shi Sheng
b9caca3289 Update README.md 2024-04-24 15:41:33 -04:00
Shi Sheng
2b32fc66f4 Update README_EN.md 2024-04-24 15:36:38 -04:00
Shi Sheng
bffd5a24a3 Update README_EN.md 2024-04-24 15:22:32 -04:00
Vinlic科技
95f8c4e3e3 Merge pull request #83 from KPCOFGS/master
更新了中英文的README文件
2024-04-24 19:53:15 +08:00
Shi Sheng
0632d8111e Update README.md 2024-04-24 07:48:16 -04:00
Shi Sheng
f1aa2e822c Update README_EN.md 2024-04-24 07:48:08 -04:00
Vinlic科技
53436b5f21 Update README.md 2024-04-24 14:33:24 +08:00
Vinlic科技
e8284288c9 Merge pull request #81 from Yanyutin753/tem-main
feat support /v1/models to be better use lobechat
2024-04-24 13:34:13 +08:00
Clivia
04db70bec5 Merge branch 'LLM-Red-Team:master' into tem-main 2024-04-24 13:31:23 +08:00
Yanyutin753
f7c1fa7be3 feat support /v1/models to be better use lobechat 2024-04-24 13:30:46 +08:00
Vinlic科技
b9d479b9f6 Merge pull request #80 from KPCOFGS/master
更新了中英文的README文件
2024-04-24 13:29:41 +08:00
Shi Sheng
c9c26fdd31 Update README_EN.md 2024-04-23 08:26:15 -04:00
Shi Sheng
43e14b6e3e Update README.md 2024-04-23 08:22:38 -04:00
Shi Sheng
65a3fed83b Update README_EN.md 2024-04-23 08:20:52 -04:00
Shi Sheng
4a225853af Update README.md 2024-04-23 08:20:28 -04:00
Shi Sheng
6b343f4094 Update README_EN.md 2024-04-23 08:17:49 -04:00
Shi Sheng
e8c6622e83 Update README_EN.md 2024-04-23 08:15:43 -04:00
Shi Sheng
ae6dc4a79f Update README_EN.md 2024-04-23 08:09:42 -04:00
Shi Sheng
bdb8ced5ce Update README.md 2024-04-23 08:08:49 -04:00
Shi Sheng
a0c1bba3c9 Update README.md 2024-04-23 08:08:00 -04:00
Shi Sheng
c6da81a53e Update README.md 2024-04-23 08:06:25 -04:00
Vinlic科技
77d42d9484 Update README.md 2024-04-22 16:43:00 +08:00
Vinlic科技
d73a9bc95d Merge pull request #72 from XunjunYin/master
Update: README.md typo
2024-04-20 16:51:35 +08:00
Xunjun Yin
65f45697e8 Update: README.md typo 2024-04-20 16:34:55 +08:00
8 changed files with 248 additions and 87 deletions

48
.github/workflows/sync.yml vendored Normal file
View File

@@ -0,0 +1,48 @@
name: Upstream Sync
permissions:
contents: write
issues: write
actions: write
on:
schedule:
- cron: '0 * * * *' # every hour
workflow_dispatch:
jobs:
sync_latest_from_upstream:
name: Sync latest commits from upstream repo
runs-on: ubuntu-latest
if: ${{ github.event.repository.fork }}
steps:
- uses: actions/checkout@v4
- name: Clean issue notice
uses: actions-cool/issues-helper@v3
with:
actions: 'close-issues'
labels: '🚨 Sync Fail'
- name: Sync upstream changes
id: sync
uses: aormsby/Fork-Sync-With-Upstream-action@v3.4
with:
upstream_sync_repo: LLM-Red-Team/kimi-free-api
upstream_sync_branch: master
target_sync_branch: master
target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set
test_mode: false
- name: Sync check
if: failure()
uses: actions-cool/issues-helper@v3
with:
actions: 'create-issue'
title: '🚨 同步失败 | Sync Fail'
labels: '🚨 Sync Fail'
body: |
Due to a change in the workflow file of the LLM-Red-Team/kimi-free-api upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork. Please refer to the detailed [Tutorial][tutorial-en-US] for instructions.
由于 LLM-Red-Team/kimi-free-api 上游仓库的 workflow 文件变更,导致 GitHub 自动暂停了本次自动更新,你需要手动 Sync Fork 一次,

View File

@@ -6,7 +6,7 @@
<span>[ 中文 | <a href="README_EN.md">English</a> ]</span>
![](https://img.shields.io/github/license/llm-red-team/kimi-free-api.svg)
[![](https://img.shields.io/github/license/llm-red-team/kimi-free-api.svg)](LICENSE)
![](https://img.shields.io/github/stars/llm-red-team/kimi-free-api.svg)
![](https://img.shields.io/github/forks/llm-red-team/kimi-free-api.svg)
![](https://img.shields.io/docker/pulls/vinlic/kimi-free-api.svg)
@@ -15,15 +15,17 @@
与ChatGPT接口完全兼容。
还有以下个free-api欢迎关注
还有以下个free-api欢迎关注
阶跃星辰 (跃问StepChat) 接口转API [step-free-api](https://github.com/LLM-Red-Team/step-free-api)
阿里通义 (Qwen) 接口转API [qwen-free-api](https://github.com/LLM-Red-Team/qwen-free-api)
ZhipuAI (智谱清言) 接口转API [glm-free-api](https://github.com/LLM-Red-Team/glm-free-api)
智谱AI (智谱清言) 接口转API [glm-free-api](https://github.com/LLM-Red-Team/glm-free-api)
秘塔AI (metaso) 接口转API [metaso-free-api](https://github.com/LLM-Red-Team/metaso-free-api)
秘塔AI (Metaso) 接口转API [metaso-free-api](https://github.com/LLM-Red-Team/metaso-free-api)
讯飞星火Spark接口转API [spark-free-api](https://github.com/LLM-Red-Team/spark-free-api)
聆心智能 (Emohaa) 接口转API [emohaa-free-api](https://github.com/LLM-Red-Team/emohaa-free-api)
@@ -38,7 +40,9 @@ ZhipuAI (智谱清言) 接口转API [glm-free-api](https://github.com/LLM-Red-Te
* [Docker-compose部署](#Docker-compose部署)
* [Render部署](#Render部署)
* [Vercel部署](#Vercel部署)
* [Zeabur部署](#Zeabur部署)
* [原生部署](#原生部署)
* [推荐使用客户端](#推荐使用客户端)
* [接口列表](#接口列表)
* [对话补全](#对话补全)
* [文档解读](#文档解读)
@@ -46,9 +50,13 @@ ZhipuAI (智谱清言) 接口转API [glm-free-api](https://github.com/LLM-Red-Te
* [refresh_token存活检测](#refresh_token存活检测)
* [注意事项](#注意事项)
* [Nginx反代优化](#Nginx反代优化)
* [Token统计](#Token统计)
* [Star History](#star-history)
## 免责声明
**逆向API是不稳定的建议前往MoonshotAI官方 https://platform.moonshot.cn/ 付费使用API避免封禁的风险。**
**本组织和个人不接受任何资金捐助和交易,此项目是纯粹研究交流学习性质!**
**仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担!**
@@ -234,6 +242,14 @@ pm2 reload kimi-free-api
pm2 stop kimi-free-api
```
## 推荐使用客户端
使用以下二次开发客户端接入free-api系列项目更快更简单支持文档/图像上传!
由 [Clivia](https://github.com/Yanyutin753/lobe-chat) 二次开发的LobeChat [https://github.com/Yanyutin753/lobe-chat](https://github.com/Yanyutin753/lobe-chat)
由 [时光@](https://github.com/SuYxh) 二次开发的ChatGPT Web [https://github.com/SuYxh/chatgpt-web-sea](https://github.com/SuYxh/chatgpt-web-sea)
## 接口列表
目前支持与openai兼容的 `/v1/chat/completions` 接口可自行使用与openai或其他兼容的客户端接入接口或者使用 [dify](https://dify.ai/) 等线上服务接入使用。
@@ -255,6 +271,9 @@ Authorization: Bearer [refresh_token]
{
// 模型名称随意填写如果不希望输出检索过程模型名称请包含silent_search
"model": "kimi",
// 目前多轮对话基于消息合并实现某些场景可能导致能力下降且受单轮最大Token数限制
// 如果您想获得原生的多轮对话体验可以传入首轮消息获得的id来接续上下文注意如果使用这个首轮必须传none否则第二轮会空响应
// "conversation_id": "cnndivilnl96vah411dg",
"messages": [
{
"role": "user",
@@ -271,6 +290,7 @@ Authorization: Bearer [refresh_token]
响应数据:
```json
{
// 如果想获得原生多轮对话体验此id你可以传入到下一轮对话的conversation_id来接续上下文
"id": "cnndivilnl96vah411dg",
"model": "kimi",
"object": "chat.completion",
@@ -425,7 +445,7 @@ Authorization: Bearer [refresh_token]
### refresh_token存活检测
检测refresh_token是否存活如果存活livetrue否则为false请不要频繁小于10分钟调用此接口。
检测refresh_token是否存活如果存活livetrue否则为false请不要频繁小于10分钟调用此接口。
**POST /token/check**

View File

@@ -1,6 +1,9 @@
# KIMI AI Free Service
![](https://img.shields.io/github/license/llm-red-team/kimi-free-api.svg)
<hr>
[![](https://img.shields.io/github/license/llm-red-team/kimi-free-api.svg)](LICENSE)
![](https://img.shields.io/github/stars/llm-red-team/kimi-free-api.svg)
![](https://img.shields.io/github/forks/llm-red-team/kimi-free-api.svg)
![](https://img.shields.io/docker/pulls/vinlic/kimi-free-api.svg)
@@ -9,36 +12,41 @@ Supports high-speed streaming output, multi-turn dialogues, internet search, lon
Fully compatible with the ChatGPT interface.
Also, the following four free APIs are available for your attention:
Also, the following six free APIs are available for your attention:
Step to the Stars (StepChat) API to API [step-free-api](https://github.com/LLM-Red-Team/step-free-api)
StepFun (StepChat) API to API [step-free-api](https://github.com/LLM-Red-Team/step-free-api)
Ali Tongyi (Qwen) API to API [qwen-free-api](https://github.com/LLM-Red-Team/qwen-free-api)
ZhipuAI (Wisdom Map Clear Words) API to API [glm-free-api](https://github.com/LLM-Red-Team/glm-free-api)
ZhipuAI (ChatGLM) API to API [glm-free-api](https://github.com/LLM-Red-Team/glm-free-api)
MetaAI (metaso) 接口转API [metaso-free-api](https://github.com/LLM-Red-Team/metaso-free-api)
Meta Sota (metaso) API to API [metaso-free-api](https://github.com/LLM-Red-Team/metaso-free-api)
Listening Intelligence (Emohaa) API to API [emohaa-free-api](https://github.com/LLM-Red-Team/emohaa-free-api)
Iflytek Spark (Spark) API to API [spark-free-api](https://github.com/LLM-Red-Team/spark-free-api)
Lingxin Intelligence (Emohaa) API to API [emohaa-free-api](https://github.com/LLM-Red-Team/emohaa-free-api)
## Table of Contents
* [Disclaimer](#disclaimer)
*[Online experience](#在线experience)
* [Effect Example](#EffectExample)
* [Access preparation](#access preparation)
* [Multiple account access](#multiple account access)
* [Docker Deployment](#DockerDeployment)
* [Docker-compose deployment](#Docker-compose deployment)
* [Native Deployment](#nativedeployment)
* [Interface List](#Interface List)
* [Dialogue completion](#dialogue completion)
* [Document Interpretation](#document interpretation)
* [Image analysis](#imageanalysis)
* [refresh_token survival detection](#refresh_token survival detection)
* [Note](# NOTE)
* [Nginx anti-generation optimization](#Nginx anti-generation optimization)
* [Online experience](#Online-Experience)
* [Effect Examples](#Effect-Examples)
* [Access preparation](#Access-Preparation)
* [Multiple account access](#Multi-Account-Access)
* [Docker Deployment](#Docker-Deployment)
* [Docker-compose deployment](#Docker-compose-deployment)
* [Zeabur Deployment](#Zeabur-Deployment)
* [Native Deployment](#Native-deployment)
* [Interface List](#Interface-List)
* [Conversation completion](#conversation-completion)
* [Document Interpretation](#document-interpretation)
* [Image analysis](#image-analysis)
* [refresh_token survival detection](#refresh_token-survival-detection)
* [Precautions](#Precautions)
* [Nginx anti-generation optimization](#Nginx-anti-generation-optimization)
* [Token statistics](#Token-statistics)
* [Star History](#star-history)
## Disclaimer
**This organization and individuals do not accept any financial donations and transactions. This project is purely for research, communication, and learning purposes!**
@@ -145,6 +153,10 @@ services:
- TZ=Asia/Shanghai
```
## Zeabur Deployment
[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/GRFYBP)
## Native deployment
Please prepare a server with a public IP and open port 8000.
@@ -193,10 +205,6 @@ Out of service
pm2 stop kimi-free-api
```
## Zeabur Deployment
[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/GRFYBP)
## interface list
Currently, the `/v1/chat/completions` interface compatible with openai is supported. You can use the client access interface compatible with openai or other clients, or use online services such as [dify](https://dify.ai/) Access and use.
@@ -431,4 +439,4 @@ Since the inference side is not in kimi-free-api, the token cannot be counted an
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=LLM-Red-Team/kimi-free-api&type=Date)](https://star-history.com/ #LLM-Red-Team/kimi-free-api&Date)
[![Star History Chart](https://api.star-history.com/svg?repos=LLM-Red-Team/kimi-free-api&type=Date)](https://star-history.com/#LLM-Red-Team/kimi-free-api&Date)

View File

@@ -1,6 +1,6 @@
{
"name": "kimi-free-api",
"version": "0.0.28",
"version": "0.0.31",
"description": "Kimi Free API Server",
"type": "module",
"main": "dist/index.js",

View File

@@ -237,9 +237,10 @@ async function promptSnippetSubmit(query: string, refreshToken: string) {
* @param messages 参考gpt系列消息格式多轮对话请完整提供上下文
* @param refreshToken 用于刷新access_token的refresh_token
* @param useSearch 是否开启联网搜索
* @param refConvId 引用会话ID
* @param retryCount 重试次数
*/
async function createCompletion(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, retryCount = 0) {
async function createCompletion(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, refConvId?: string, retryCount = 0) {
return (async () => {
logger.info(messages);
@@ -252,14 +253,19 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
.catch(err => logger.error(err));
// 创建会话
const convId = await createConversation("未命名会话", refreshToken);
const convId = /[0-9a-zA-Z]{20}/.test(refConvId) ? refConvId : await createConversation("未命名会话", refreshToken);
// 请求流
const {
accessToken,
userId
} = await acquireToken(refreshToken);
const sendMessages = messagesPrepare(messages);
const sendMessages = messagesPrepare(messages, !!refConvId);
console.log(convId, {
messages: sendMessages,
refs,
use_search: useSearch
});
const result = await axios.post(`https://kimi.moonshot.cn/api/chat/${convId}/completion/stream`, {
messages: sendMessages,
refs,
@@ -268,6 +274,7 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
headers: {
Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/chat/${convId}`,
'Priority': 'u=1, i',
'X-Traffic-Id': userId,
...FAKE_HEADERS
},
@@ -283,7 +290,8 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`);
// 异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略
removeConversation(convId, refreshToken)
// 如果引用会话将不会清除,因为我们不知道什么时候你会结束会话
!refConvId && removeConversation(convId, refreshToken)
.catch(err => console.error(err));
promptSnippetSubmit(sendMessages[0].content, refreshToken)
.catch(err => console.error(err));
@@ -296,7 +304,7 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`);
return (async () => {
await new Promise(resolve => setTimeout(resolve, RETRY_DELAY));
return createCompletion(model, messages, refreshToken, useSearch, retryCount + 1);
return createCompletion(model, messages, refreshToken, useSearch, refConvId, retryCount + 1);
})();
}
throw err;
@@ -310,9 +318,10 @@ async function createCompletion(model = MODEL_NAME, messages: any[], refreshToke
* @param messages 参考gpt系列消息格式多轮对话请完整提供上下文
* @param refreshToken 用于刷新access_token的refresh_token
* @param useSearch 是否开启联网搜索
* @param refConvId 引用会话ID
* @param retryCount 重试次数
*/
async function createCompletionStream(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, retryCount = 0) {
async function createCompletionStream(model = MODEL_NAME, messages: any[], refreshToken: string, useSearch = true, refConvId?: string, retryCount = 0) {
return (async () => {
logger.info(messages);
@@ -325,14 +334,14 @@ async function createCompletionStream(model = MODEL_NAME, messages: any[], refre
.catch(err => logger.error(err));
// 创建会话
const convId = await createConversation("未命名会话", refreshToken);
const convId = /[0-9a-zA-Z]{20}/.test(refConvId) ? refConvId : await createConversation("未命名会话", refreshToken);
// 请求流
const {
accessToken,
userId
} = await acquireToken(refreshToken);
const sendMessages = messagesPrepare(messages);
const sendMessages = messagesPrepare(messages, !!refConvId);
const result = await axios.post(`https://kimi.moonshot.cn/api/chat/${convId}/completion/stream`, {
messages: sendMessages,
refs,
@@ -343,6 +352,7 @@ async function createCompletionStream(model = MODEL_NAME, messages: any[], refre
headers: {
Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/chat/${convId}`,
'Priority': 'u=1, i',
'X-Traffic-Id': userId,
...FAKE_HEADERS
},
@@ -354,7 +364,8 @@ async function createCompletionStream(model = MODEL_NAME, messages: any[], refre
return createTransStream(model, convId, result.data, () => {
logger.success(`Stream has completed transfer ${util.timestamp() - streamStartTime}ms`);
// 流传输结束后异步移除会话,如果消息不合规,此操作可能会抛出数据库错误异常,请忽略
removeConversation(convId, refreshToken)
// 如果引用会话将不会清除,因为我们不知道什么时候你会结束会话
!refConvId && removeConversation(convId, refreshToken)
.catch(err => console.error(err));
promptSnippetSubmit(sendMessages[0].content, refreshToken)
.catch(err => console.error(err));
@@ -366,7 +377,7 @@ async function createCompletionStream(model = MODEL_NAME, messages: any[], refre
logger.warn(`Try again after ${RETRY_DELAY / 1000}s...`);
return (async () => {
await new Promise(resolve => setTimeout(resolve, RETRY_DELAY));
return createCompletionStream(model, messages, refreshToken, useSearch, retryCount + 1);
return createCompletionStream(model, messages, refreshToken, useSearch, refConvId, retryCount + 1);
})();
}
throw err;
@@ -447,8 +458,9 @@ function extractRefFileUrls(messages: any[]) {
* user:新消息
*
* @param messages 参考gpt系列消息格式多轮对话请完整提供上下文
* @param isRefConv 是否为引用会话
*/
function messagesPrepare(messages: any[]) {
function messagesPrepare(messages: any[], isRefConv = false) {
// 注入消息提升注意力
let latestMessage = messages[messages.length - 1];
let hasFileOrImage = Array.isArray(latestMessage.content)
@@ -472,16 +484,32 @@ function messagesPrepare(messages: any[]) {
}
}
const content = messages.reduce((content, message) => {
if (Array.isArray(message.content)) {
return message.content.reduce((_content, v) => {
if (!_.isObject(v) || v['type'] != 'text') return _content;
return _content + `${message.role || "user"}:${v["text"] || ""}\n`;
}, content);
}
return content += `${message.role || "user"}:${message.role == 'user' ? wrapUrlsToTags(message.content) : message.content}\n`;
}, '');
logger.info("\n对话合并\n" + content);
let content;
if (isRefConv || messages.length < 2) {
content = messages.reduce((content, message) => {
if (_.isArray(message.content)) {
return message.content.reduce((_content, v) => {
if (!_.isObject(v) || v['type'] != 'text') return _content;
return _content + `${v["text"] || ""}\n`;
}, content);
}
return content += `${message.role == 'user' ? wrapUrlsToTags(message.content) : message.content}\n`;
}, '')
logger.info("\n透传内容\n" + content);
}
else {
content = messages.reduce((content, message) => {
if (_.isArray(message.content)) {
return message.content.reduce((_content, v) => {
if (!_.isObject(v) || v['type'] != 'text') return _content;
return _content + `${message.role || "user"}:${v["text"] || ""}\n`;
}, content);
}
return content += `${message.role || "user"}:${message.role == 'user' ? wrapUrlsToTags(message.content) : message.content}\n`;
}, '')
logger.info("\n对话合并\n" + content);
}
return [
{ role: 'user', content }
]
@@ -609,35 +637,49 @@ async function uploadFile(fileUrl: string, refreshToken: string) {
});
checkResult(result, refreshToken);
// 获取文件上传结果
result = await axios.post('https://kimi.moonshot.cn/api/file', {
type: 'file',
name: filename,
object_name: objectName,
timeout: 15000
}, {
headers: {
Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/`,
'X-Traffic-Id': userId,
...FAKE_HEADERS
}
});
const { id: fileId } = checkResult(result, refreshToken);
let fileId, status, startTime = Date.now();
while (status != 'initialized') {
if (Date.now() - startTime > 30000)
throw new Error('文件等待处理超时');
// 获取文件上传结果
result = await axios.post('https://kimi.moonshot.cn/api/file', {
type: 'file',
name: filename,
object_name: objectName,
timeout: 15000
}, {
headers: {
Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/`,
'X-Traffic-Id': userId,
...FAKE_HEADERS
}
});
({ id: fileId, status } = checkResult(result, refreshToken));
}
// 处理文件转换
result = await axios.post('https://kimi.moonshot.cn/api/file/parse_process', {
ids: [fileId],
timeout: 120000
}, {
headers: {
Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/`,
'X-Traffic-Id': userId,
...FAKE_HEADERS
}
});
checkResult(result, refreshToken);
startTime = Date.now();
let parseFinish = false;
while (!parseFinish) {
if (Date.now() - startTime > 30000)
throw new Error('文件等待处理超时');
// 处理文件转换
parseFinish = await new Promise(resolve => {
axios.post('https://kimi.moonshot.cn/api/file/parse_process', {
ids: [fileId],
timeout: 120000
}, {
headers: {
Authorization: `Bearer ${accessToken}`,
Referer: `https://kimi.moonshot.cn/`,
'X-Traffic-Id': userId,
...FAKE_HEADERS
}
})
.then(() => resolve(true))
.catch(() => resolve(false));
});
}
return fileId;
}

View File

@@ -13,22 +13,22 @@ export default {
'/completions': async (request: Request) => {
request
.validate('body.conversation_id', v => _.isUndefined(v) || _.isString(v))
.validate('body.messages', _.isArray)
.validate('headers.authorization', _.isString)
// refresh_token切分
const tokens = chat.tokenSplit(request.headers.authorization);
// 随机挑选一个refresh_token
const token = _.sample(tokens);
const model = request.body.model;
const messages = request.body.messages;
if (request.body.stream) {
const stream = await chat.createCompletionStream(model, messages, token, request.body.use_search);
const { model, conversation_id: convId, messages, stream, use_search } = request.body;
if (stream) {
const stream = await chat.createCompletionStream(model, messages, token, use_search, convId);
return new Response(stream, {
type: "text/event-stream"
});
}
else
return await chat.createCompletion(model, messages, token, request.body.use_search);
return await chat.createCompletion(model, messages, token, use_search, convId);
}
}

View File

@@ -4,6 +4,7 @@ import Response from '@/lib/response/Response.ts';
import chat from "./chat.ts";
import ping from "./ping.ts";
import token from './token.ts';
import models from './models.ts';
export default [
{
@@ -21,5 +22,6 @@ export default [
},
chat,
ping,
token
token,
models
];

41
src/api/routes/models.ts Normal file
View File

@@ -0,0 +1,41 @@
import _ from 'lodash';
export default {
prefix: '/v1',
get: {
'/models': async () => {
return {
"data": [
{
"id": "moonshot-v1",
"object": "model",
"owned_by": "kimi-free-api"
},
{
"id": "moonshot-v1-8k",
"object": "model",
"owned_by": "kimi-free-api"
},
{
"id": "moonshot-v1-32k",
"object": "model",
"owned_by": "kimi-free-api"
},
{
"id": "moonshot-v1-128k",
"object": "model",
"owned_by": "kimi-free-api"
},
{
"id": "moonshot-v1-vision",
"object": "model",
"owned_by": "kimi-free-api"
}
]
};
}
}
}