This commit is contained in:
hailin 2025-07-27 12:23:08 +08:00
parent ccf3398741
commit 68a12b4b4a
1 changed files with 96 additions and 94 deletions

View File

@ -1,113 +1,115 @@
import gradio as gr import os, json, datetime, textwrap, requests, gradio as gr
import requests, json, textwrap, datetime from pathlib import Path
API_URL = "http://localhost:30000/generate" # 原生 generate #───────────────────────────────────────────────────────────────────────────────
# 1. 服务端 & 权重路径
#───────────────────────────────────────────────────────────────────────────────
API_URL = "http://localhost:30000/generate"
API_KEY = "token-abc123" API_KEY = "token-abc123"
MODEL_NAME = "Qwen3-14b-base" MODEL_PATH = Path("/root/.cradle/Alibaba/Qwen3-30B-A3B-Base") # ← 改成 supervisor 里传的路径
# 自动读取权重里的名字,若失败就退回目录名
def detect_model_name(model_path: Path) -> str:
cfg = model_path / "config.json"
if cfg.exists():
with open(cfg, "r", encoding="utf-8") as f:
data = json.load(f)
# Qwen / LLaMA / GPTNeoX … 都有 "architectures" 或 "model_type"
return data.get("architectures", [None])[0] or data.get("model_type") or model_path.name
return model_path.name
MODEL_NAME = detect_model_name(MODEL_PATH)
def now(): def now():
return datetime.datetime.now().strftime("%H:%M:%S") return datetime.datetime.now().strftime("%H:%M:%S")
# ---------------------------------------- #───────────────────────────────────────────────────────────────────────────────
# 主对话函数 # 2. 调用 SGLang /generate
# ---------------------------------------- #───────────────────────────────────────────────────────────────────────────────
def chat(user_message, history, max_tokens, temperature): def call_backend(text, sampling):
headers = { payload = {"model": MODEL_NAME, "text": text, "sampling_params": sampling}
"Authorization": f"Bearer {API_KEY}", print(f"\n🟡 [{now()} payload] {json.dumps(payload, ensure_ascii=False)[:400]}")
"Content-Type": "application/json" resp = requests.post(
} API_URL,
payload = { headers={"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"},
"model": MODEL_NAME, json=payload, timeout=180
"text": user_message, # generate 使用 text )
"sampling_params": { # ★ 放进 sampling_params if resp.status_code != 200:
"max_new_tokens": int(max_tokens), return f"[HTTP {resp.status_code}] {resp.text[:300]}"
"temperature": temperature
}
}
print(f"\n🟡 [{now()} chat] payload:\n{json.dumps(payload, ensure_ascii=False, indent=2)}")
try: try:
resp = requests.post(API_URL, headers=headers, json=payload, timeout=60) return json.loads(resp.text).get("text", "").strip() or "[⚠ 后端无 text]"
print(f"🟢 [{now()} chat] status={resp.status_code}") except json.JSONDecodeError:
print(f"🟢 [{now()} chat] headers={resp.headers}") snippet = textwrap.shorten(resp.text, 300, placeholder="")
print(f"🟢 [{now()} chat] body前400字={resp.text[:400]}") return f"[⚠ JSON 解析失败] {snippet}"
# —— 解析 —— # #───────────────────────────────────────────────────────────────────────────────
if resp.status_code != 200: # 3. Gradio 主函数
return f"[⚠️ HTTP {resp.status_code}] {resp.text[:300]}" #───────────────────────────────────────────────────────────────────────────────
def chat(
if not resp.text.strip(): # 空 body user_msg, history,
return "[⚠️ 后端返回空响应]" max_new, temperature, top_p, top_k,
rep_pen, pres_pen, stop_raw
try: ):
data = json.loads(resp.text) stop = [s.strip() for s in stop_raw.split(",") if s.strip()] or None
return data.get("text", "").strip() or "[⚠️ JSON 中无 text 字段]" sampling = {
except json.JSONDecodeError as je: "max_new_tokens": int(max_new),
snippet = textwrap.shorten(resp.text, width=400, placeholder=" ...") "temperature": temperature,
return f"[⚠️ JSON 解析失败] {je}\n原始片段:\n{snippet}" "top_p": top_p,
"top_k": int(top_k),
except Exception as e: "repetition_penalty": rep_pen,
return f"[❌ 请求异常] {e}" "presence_penalty": pres_pen,
# ----------------------------------------
# API 测试函数
# ----------------------------------------
def test_api(max_tokens, temperature):
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
payload = {
"model": MODEL_NAME,
"text": "Ping?",
"sampling_params": { # ★ 同样改这里 ★
"max_new_tokens": int(max_tokens),
"temperature": temperature
}
} }
if stop: sampling["stop"] = stop
return call_backend(user_msg, sampling)
print(f"\n🔵 [{now()} test] payload:\n{json.dumps(payload, ensure_ascii=False, indent=2)}") #───────────────────────────────────────────────────────────────────────────────
# 4. Gradio UI
#───────────────────────────────────────────────────────────────────────────────
with gr.Blocks(title="Base 模型对话界面") as demo:
gr.Markdown(f"## 💬 Base 模型对话界面 \n*正在使用权重* **{MODEL_PATH.name}**")
try: # ── 采样参数控件 ───────────────────────────────────────────────────────────
resp = requests.post(API_URL, headers=headers, json=payload, timeout=10) with gr.Row():
print(f"🟢 [{now()} test] status={resp.status_code}") max_new = gr.Slider(32, 32768, 2048, label="max_new_tokens")
print(f"🟢 [{now()} test] body={resp.text}") temperature = gr.Slider(0.0, 1.5, 0.8, step=0.05, label="temperature")
if resp.status_code != 200 or not resp.text.strip():
return f"⚠️ 状态{resp.status_code} or 空响应: {resp.text[:200]}"
try:
out = json.loads(resp.text).get("text", "").strip()
return f"✅ API 正常,返回: {out or '[空文本]'}"
except json.JSONDecodeError as je:
return f"⚠️ JSON 解析失败: {je} 片段:{resp.text[:200]}"
except Exception as e:
return f"❌ 请求异常: {e}"
# ----------------------------------------
# Gradio UI
# ----------------------------------------
with gr.Blocks(title="Base 模型测试 UI") as demo:
gr.Markdown("# 💬 Base 模型对话界面")
with gr.Row(): with gr.Row():
max_tokens = gr.Slider(32, 32768, value=2048, label="max_tokens") # 先别直接 8k+ top_p = gr.Slider(0.0, 1.0, 0.95, step=0.01, label="top_p")
temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="temperature") top_k = gr.Slider(0, 200, 50, step=1, label="top_k")
test_btn = gr.Button("🔁 测试 API 可用性")
test_output = gr.Textbox(label="API 测试结果", interactive=False)
def chat_with_config(message, history, max_tokens, temperature): with gr.Row():
return chat(message, history, max_tokens, temperature) rep_pen = gr.Slider(0.8, 2.0, 1.05, step=0.01, label="repetition_penalty")
pres_pen = gr.Slider(0.0, 2.0, 0.0, step=0.05, label="presence_penalty")
chatbot = gr.ChatInterface( stop_text = gr.Textbox("", label="stop 序列(逗号分隔)", placeholder="如: ###,END")
fn=chat_with_config,
additional_inputs=[max_tokens, temperature], # ── Chatbot & 按钮 ────────────────────────────────────────────────────────
type="messages", ping_btn = gr.Button("🔁 测试 API")
title=None ping_out = gr.Textbox(label="API 测试结果", interactive=False)
chat_ui = gr.ChatInterface(
fn=chat,
additional_inputs=[max_new, temperature, top_p, top_k, rep_pen, pres_pen, stop_text],
type="messages"
) )
test_btn.click(fn=test_api, inputs=[max_tokens, temperature], outputs=test_output) def ping_api(max_new, temperature, top_p, top_k, rep_pen, pres_pen, stop_raw):
stop = [s.strip() for s in stop_raw.split(",") if s.strip()] or None
sampling = {
"max_new_tokens": int(max_new),
"temperature": temperature,
"top_p": top_p,
"top_k": int(top_k),
"repetition_penalty": rep_pen,
"presence_penalty": pres_pen,
**({"stop": stop} if stop else {})
}
return call_backend("Ping?", sampling)[:200]
ping_btn.click(
fn=ping_api,
inputs=[max_new, temperature, top_p, top_k, rep_pen, pres_pen, stop_text],
outputs=ping_out
)
demo.launch(server_name="0.0.0.0", server_port=30001) demo.launch(server_name="0.0.0.0", server_port=30001)