添加ChatGLM2教程 (#125)

* Create GLM2对接教程.md

* 添加GLM2接入教程

* Delete GLM2对接教程.md

* Delete image.png

* Delete openai_api.py

* Delete openai_api_int4.py

* Delete openai_api_int8.py

* Create GLM2对接教程.md

* 添加ChatGLM2接口

* Delete openai_api_int4.py

* Delete openai_api_int8.py

* Update openai_api.py

* Update GLM2对接教程.md
This commit is contained in:
stakeswky
2023-07-13 09:21:19 +08:00
committed by GitHub
parent 5f784e1def
commit 0be7a3e355
3 changed files with 210 additions and 0 deletions

View File

@@ -0,0 +1,36 @@
# 3分钟在Fastgpt上用上GLM
## 前言
Fast GPT 允许你使用自己的 openai API KEY 来快速的调用 openai 接口,目前集成了 Gpt35, Gpt4 和 embedding. 可构建自己的知识库。但考虑到数据安全的问题我们并不能将所有的数据都交付给云端大模型。那如何在fastgpt上接入私有化模型呢本文就以清华的ChatGLM2为例为各位讲解如何在fastgpt中接入私有化模型。
## ChatGLM2简介
ChatGLM2-6B 是开源中英双语对话模型 ChatGLM-6B 的第二代版本具体介绍请看项目https://github.com/THUDM/ChatGLM2-6B
注意ChatGLM2-6B 权重对学术研究完全开放,在获得官方的书面许可后,亦允许商业使用。本教程只是介绍了一种用法,并不会给予任何授权。
## 推荐配置
依据官方数据,同样是生成 8192 长度量化等级为FP16要占用12.8GB 显存、INT8为8.1GB显存、INT4为5.1GB显存,量化后会稍微影响性能,但不多。
因此推荐配置如下:
fp16:内存>=16GB,显存>=16GB,硬盘空间>=25GB,启动时使用命令python openai_api.py 16
int8:内存>=16GB,显存>=9GB,硬盘空间>=25GB,启动时选择python openai_api.py 8
int4:内存>=16GB,显存>=6GB,硬盘空间>=25GB,启动时选择python openai_api.py 4
## 环境配置
Python 3.8.10
CUDA 11.8
科学上网环境
## 简单的步骤
1. 根据上面的环境配置配置好环境具体教程自行GPT
1. 在命令行输入pip install -r requirments.txt
2. 打开你需要启动的py文件在代码的第76行配置token这里的token只是加一层验证防止接口被人盗用
2. python openai_api.py 16//这里的数字根据上面的配置进行选择
然后等待模型下载直到模型加载完毕出现报错先问GPT
上面两个文件在本文档的同目录
启动成功后应该会显示如下地址:
![Alt text](image.png)
这里的http://0.0.0.0:6006就是连接地址
然后现在回到.env.local文件依照以下方式配置地址
OPENAI_BASE_URL=http://127.0.0.1:6006/v1
OPENAIKEY=sk-aaabbbcccdddeeefffggghhhiiijjjkkk //这里是你在代码中配置的token
这里的OPENAIKEY可以任意填写
这样就成功接入ChatGLM2了

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

View File

@@ -0,0 +1,174 @@
# coding=utf-8
import time
import torch
import uvicorn
from pydantic import BaseModel, Field
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from contextlib import asynccontextmanager
from typing import Any, Dict, List, Literal, Optional, Union
from transformers import AutoTokenizer, AutoModel
from sse_starlette.sse import ServerSentEvent, EventSourceResponse
from fastapi import Depends, HTTPException, Request
from starlette.status import HTTP_401_UNAUTHORIZED
import argparse
@asynccontextmanager
async def lifespan(app: FastAPI): # collects GPU memory
yield
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
app = FastAPI(lifespan=lifespan)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class ChatMessage(BaseModel):
role: Literal["user", "assistant", "system"]
content: str
class DeltaMessage(BaseModel):
role: Optional[Literal["user", "assistant", "system"]] = None
content: Optional[str] = None
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatMessage]
temperature: Optional[float] = None
top_p: Optional[float] = None
max_length: Optional[int] = None
stream: Optional[bool] = False
class ChatCompletionResponseChoice(BaseModel):
index: int
message: ChatMessage
finish_reason: Literal["stop", "length"]
class ChatCompletionResponseStreamChoice(BaseModel):
index: int
delta: DeltaMessage
finish_reason: Optional[Literal["stop", "length"]]
class ChatCompletionResponse(BaseModel):
model: str
object: Literal["chat.completion", "chat.completion.chunk"]
choices: List[Union[ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice]]
created: Optional[int] = Field(default_factory=lambda: int(time.time()))
async def verify_token(request: Request):
auth_header = request.headers.get('Authorization')
if auth_header:
token_type, _, token = auth_header.partition(' ')
if token_type.lower() == "bearer" and token == "sk-aaabbbcccdddeeefffggghhhiiijjjkkk": # 这里配置你的token
return True
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid authorization credentials",
)
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
async def create_chat_completion(request: ChatCompletionRequest, token: bool = Depends(verify_token)):
global model, tokenizer
if request.messages[-1].role != "user":
raise HTTPException(status_code=400, detail="Invalid request")
query = request.messages[-1].content
prev_messages = request.messages[:-1]
if len(prev_messages) > 0 and prev_messages[0].role == "system":
query = prev_messages.pop(0).content + query
history = []
if len(prev_messages) % 2 == 0:
for i in range(0, len(prev_messages), 2):
if prev_messages[i].role == "user" and prev_messages[i+1].role == "assistant":
history.append([prev_messages[i].content, prev_messages[i+1].content])
if request.stream:
generate = predict(query, history, request.model)
return EventSourceResponse(generate, media_type="text/event-stream")
response, _ = model.chat(tokenizer, query, history=history)
choice_data = ChatCompletionResponseChoice(
index=0,
message=ChatMessage(role="assistant", content=response),
finish_reason="stop"
)
return ChatCompletionResponse(model=request.model, choices=[choice_data], object="chat.completion")
async def predict(query: str, history: List[List[str]], model_id: str):
global model, tokenizer
choice_data = ChatCompletionResponseStreamChoice(
index=0,
delta=DeltaMessage(role="assistant"),
finish_reason=None
)
chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk")
yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False))
current_length = 0
for new_response, _ in model.stream_chat(tokenizer, query, history):
if len(new_response) == current_length:
continue
new_text = new_response[current_length:]
current_length = len(new_response)
choice_data = ChatCompletionResponseStreamChoice(
index=0,
delta=DeltaMessage(content=new_text),
finish_reason=None
)
chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk")
yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False))
choice_data = ChatCompletionResponseStreamChoice(
index=0,
delta=DeltaMessage(),
finish_reason="stop"
)
chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk")
yield "{}".format(chunk.json(exclude_unset=True, ensure_ascii=False))
yield '[DONE]'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", default="16", type=str, help="Model name")
args = parser.parse_args()
model_dict = {
"4": "THUDM/chatglm2-6b-int4",
"8": "THUDM/chatglm2-6b-int8",
"16": "THUDM/chatglm2-6b"
}
model_name = model_dict.get(args.model_name, "THUDM/chatglm2-6b")
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModel.from_pretrained(model_name, trust_remote_code=True).cuda()
model.eval()
uvicorn.run(app, host='0.0.0.0', port=6006, workers=1)