1 Star 1 Fork 8

衣沾不足惜/gitee-ai-docs

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
app.py 3.06 KB
一键复制 编辑 原始数据 按行查看 历史
衣沾不足惜 提交于 2024-10-25 10:34 +08:00 . dtype 改为 bfloat
import subprocess
import os
import time
import torch
import utils
model_name = "hf-models/Qwen2.5-7B-Instruct"
GITEE_ACCESS_TOKEN = os.environ.get("GITEE_ACCESS_TOKEN", "")
# print("开始下载 lora")
# git_clone_docs_lora_command = ["git", "clone",
# f"https://oauth2:{GITEE_ACCESS_TOKEN}@gitee.com/stringify/glm4-lora-gitee-docs-9b.git", "--depth=1", "--single-branch", "./glm4-lora-gitee-docs-9b"]
# subprocess.run(
# git_clone_docs_lora_command, text=True)
# print("lora 下载完成")
# subprocess.Popen(['pip','uninstall','-y','flash_attn'])
os.environ['HF_HOME'] = '/data'
api_server_command = [
"python",
"-m",
"vllm.entrypoints.openai.api_server",
"--model",
model_name,
"--dtype",
"bfloat16",
"--api-key",
"",
"--tensor-parallel-size",
str(torch.cuda.device_count() or 2),
"--trust-remote-code",
"--gpu-memory-utilization",
"0.71",
"--max-num-batched-tokens",
# "--enable-lora",
# "--lora-modules",
# "gitee-docs-lora=./glm4-lora-gitee-docs-9b", # vllm 0.3.3 不支持 glm4 qwen2 lora: ValueError: Model Qwen2ForCausalLM does not support LoRA, but LoRA is enabled. Support for this model may be added in the future. If this is important to you, please open an issue on github.
# ValueError: max_num_batched_tokens (55000) is smaller than max_model_len (131072). This effectively limits the maximum sequence length to max_num_batched_tokens and makes vLLM reject longer sequences. Please increase max_num_batched_tokens or decrease max_model_len.
"21000",
"--max-model-len",
"21000",
"--disable-log-requests",
"--disable-log-stats",
"--port",
"8000",
"--block-size",
"16", # 0.33 只支持16 默认 16
"--max-num-seqs",
"1024"
# "--enable-chunked-prefill", # 0.3.3 不支持启用分块预填充
# 多卡跑多 模型时, vllm GPU blocks: 0 https://github.com/vllm-project/vllm/issues/2248
# "--enforce-eager"
]
chainlit_ui_process = subprocess.Popen(
['python', '-m', 'chainlit', 'run', 'chainlit_ui_crawler.py', '--host', '0.0.0.0', '--port', '7860', "--ci", "--headless"])
def wait_for_service(url):
while True:
try:
server_ready = utils.is_port_open(url)
if server_ready:
return True
except:
time.sleep(5)
if (wait_for_service("http://127.0.0.1:7860")):
api_process = subprocess.Popen(
api_server_command, text=True)
print("UI 服务已启动,开始启动 API 服务...")
print("开始启动 api 服务")
# device = torch.cuda.current_device()
# props = torch.cuda.get_device_properties(device)
# print(f"Device Name: {props.name}")
# print(f"Total Memory: {props.total_memory / (1024 ** 3)} GB")
try:
api_process.wait()
chainlit_ui_process.wait()
# except KeyboardInterrupt:
# print("Shutting down servers.")
# chainlit_ui_process.terminate()
# api_process.terminate()
# api_process.wait()
# chainlit_ui_process.wait()
finally:
api_process.kill()
chainlit_ui_process.kill()
print("Servers shut down.")
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/stringify/gitee-ai-docs.git
git@gitee.com:stringify/gitee-ai-docs.git
stringify
gitee-ai-docs
gitee-ai-docs
master

搜索帮助