diff --git a/meta_ui.py b/meta_ui.py index 5ad97d8..f648cca 100644 --- a/meta_ui.py +++ b/meta_ui.py @@ -1,113 +1,115 @@ -import gradio as gr -import requests, json, textwrap, datetime +import os, json, datetime, textwrap, requests, gradio as gr +from pathlib import Path -API_URL = "http://localhost:30000/generate" # 原生 generate +#─────────────────────────────────────────────────────────────────────────────── +# 1. 服务端 & 权重路径 +#─────────────────────────────────────────────────────────────────────────────── +API_URL = "http://localhost:30000/generate" API_KEY = "token-abc123" -MODEL_NAME = "Qwen3-14b-base" +MODEL_PATH = Path("/root/.cradle/Alibaba/Qwen3-30B-A3B-Base") # ← 改成 supervisor 里传的路径 + +# 自动读取权重里的名字,若失败就退回目录名 +def detect_model_name(model_path: Path) -> str: + cfg = model_path / "config.json" + if cfg.exists(): + with open(cfg, "r", encoding="utf-8") as f: + data = json.load(f) + # Qwen / LLaMA / GPT‑NeoX … 都有 "architectures" 或 "model_type" + return data.get("architectures", [None])[0] or data.get("model_type") or model_path.name + return model_path.name + +MODEL_NAME = detect_model_name(MODEL_PATH) def now(): return datetime.datetime.now().strftime("%H:%M:%S") -# ---------------------------------------- -# 主对话函数 -# ---------------------------------------- -def chat(user_message, history, max_tokens, temperature): - headers = { - "Authorization": f"Bearer {API_KEY}", - "Content-Type": "application/json" - } - payload = { - "model": MODEL_NAME, - "text": user_message, # generate 使用 text - "sampling_params": { # ★ 放进 sampling_params - "max_new_tokens": int(max_tokens), - "temperature": temperature - } - } - - print(f"\n🟡 [{now()} chat] payload:\n{json.dumps(payload, ensure_ascii=False, indent=2)}") - +#─────────────────────────────────────────────────────────────────────────────── +# 2. 调用 SGLang /generate +#─────────────────────────────────────────────────────────────────────────────── +def call_backend(text, sampling): + payload = {"model": MODEL_NAME, "text": text, "sampling_params": sampling} + print(f"\n🟡 [{now()} payload] {json.dumps(payload, ensure_ascii=False)[:400]}…") + resp = requests.post( + API_URL, + headers={"Authorization": f"Bearer {API_KEY}", "Content-Type": "application/json"}, + json=payload, timeout=180 + ) + if resp.status_code != 200: + return f"[HTTP {resp.status_code}] {resp.text[:300]}" try: - resp = requests.post(API_URL, headers=headers, json=payload, timeout=60) - print(f"🟢 [{now()} chat] status={resp.status_code}") - print(f"🟢 [{now()} chat] headers={resp.headers}") - print(f"🟢 [{now()} chat] body前400字={resp.text[:400]}") + return json.loads(resp.text).get("text", "").strip() or "[⚠ 后端无 text]" + except json.JSONDecodeError: + snippet = textwrap.shorten(resp.text, 300, placeholder=" …") + return f"[⚠ JSON 解析失败] {snippet}" - # —— 解析 —— # - if resp.status_code != 200: - return f"[⚠️ HTTP {resp.status_code}] {resp.text[:300]}" - - if not resp.text.strip(): # 空 body - return "[⚠️ 后端返回空响应]" - - try: - data = json.loads(resp.text) - return data.get("text", "").strip() or "[⚠️ JSON 中无 text 字段]" - except json.JSONDecodeError as je: - snippet = textwrap.shorten(resp.text, width=400, placeholder=" ...") - return f"[⚠️ JSON 解析失败] {je}\n原始片段:\n{snippet}" - - except Exception as e: - return f"[❌ 请求异常] {e}" - -# ---------------------------------------- -# API 测试函数 -# ---------------------------------------- -def test_api(max_tokens, temperature): - headers = { - "Authorization": f"Bearer {API_KEY}", - "Content-Type": "application/json" - } - payload = { - "model": MODEL_NAME, - "text": "Ping?", - "sampling_params": { # ★ 同样改这里 ★ - "max_new_tokens": int(max_tokens), - "temperature": temperature - } +#─────────────────────────────────────────────────────────────────────────────── +# 3. Gradio 主函数 +#─────────────────────────────────────────────────────────────────────────────── +def chat( + user_msg, history, + max_new, temperature, top_p, top_k, + rep_pen, pres_pen, stop_raw +): + stop = [s.strip() for s in stop_raw.split(",") if s.strip()] or None + sampling = { + "max_new_tokens": int(max_new), + "temperature": temperature, + "top_p": top_p, + "top_k": int(top_k), + "repetition_penalty": rep_pen, + "presence_penalty": pres_pen, } + if stop: sampling["stop"] = stop + return call_backend(user_msg, sampling) - print(f"\n🔵 [{now()} test] payload:\n{json.dumps(payload, ensure_ascii=False, indent=2)}") +#─────────────────────────────────────────────────────────────────────────────── +# 4. Gradio UI +#─────────────────────────────────────────────────────────────────────────────── +with gr.Blocks(title="Base 模型对话界面") as demo: + gr.Markdown(f"## 💬 Base 模型对话界面 \n*正在使用权重* **{MODEL_PATH.name}**") - try: - resp = requests.post(API_URL, headers=headers, json=payload, timeout=10) - print(f"🟢 [{now()} test] status={resp.status_code}") - print(f"🟢 [{now()} test] body={resp.text}") - - if resp.status_code != 200 or not resp.text.strip(): - return f"⚠️ 状态{resp.status_code} or 空响应: {resp.text[:200]}" - - try: - out = json.loads(resp.text).get("text", "").strip() - return f"✅ API 正常,返回: {out or '[空文本]'}" - except json.JSONDecodeError as je: - return f"⚠️ JSON 解析失败: {je} 片段:{resp.text[:200]}" - - except Exception as e: - return f"❌ 请求异常: {e}" - -# ---------------------------------------- -# Gradio UI -# ---------------------------------------- -with gr.Blocks(title="Base 模型测试 UI") as demo: - gr.Markdown("# 💬 Base 模型对话界面") + # ── 采样参数控件 ─────────────────────────────────────────────────────────── + with gr.Row(): + max_new = gr.Slider(32, 32768, 2048, label="max_new_tokens") + temperature = gr.Slider(0.0, 1.5, 0.8, step=0.05, label="temperature") with gr.Row(): - max_tokens = gr.Slider(32, 32768, value=2048, label="max_tokens") # 先别直接 8k+ - temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="temperature") - test_btn = gr.Button("🔁 测试 API 可用性") - test_output = gr.Textbox(label="API 测试结果", interactive=False) + top_p = gr.Slider(0.0, 1.0, 0.95, step=0.01, label="top_p") + top_k = gr.Slider(0, 200, 50, step=1, label="top_k") - def chat_with_config(message, history, max_tokens, temperature): - return chat(message, history, max_tokens, temperature) + with gr.Row(): + rep_pen = gr.Slider(0.8, 2.0, 1.05, step=0.01, label="repetition_penalty") + pres_pen = gr.Slider(0.0, 2.0, 0.0, step=0.05, label="presence_penalty") - chatbot = gr.ChatInterface( - fn=chat_with_config, - additional_inputs=[max_tokens, temperature], - type="messages", - title=None + stop_text = gr.Textbox("", label="stop 序列(逗号分隔)", placeholder="如: ###,END") + + # ── Chatbot & 按钮 ──────────────────────────────────────────────────────── + ping_btn = gr.Button("🔁 测试 API") + ping_out = gr.Textbox(label="API 测试结果", interactive=False) + + chat_ui = gr.ChatInterface( + fn=chat, + additional_inputs=[max_new, temperature, top_p, top_k, rep_pen, pres_pen, stop_text], + type="messages" ) - test_btn.click(fn=test_api, inputs=[max_tokens, temperature], outputs=test_output) + def ping_api(max_new, temperature, top_p, top_k, rep_pen, pres_pen, stop_raw): + stop = [s.strip() for s in stop_raw.split(",") if s.strip()] or None + sampling = { + "max_new_tokens": int(max_new), + "temperature": temperature, + "top_p": top_p, + "top_k": int(top_k), + "repetition_penalty": rep_pen, + "presence_penalty": pres_pen, + **({"stop": stop} if stop else {}) + } + return call_backend("Ping?", sampling)[:200] + + ping_btn.click( + fn=ping_api, + inputs=[max_new, temperature, top_p, top_k, rep_pen, pres_pen, stop_text], + outputs=ping_out + ) demo.launch(server_name="0.0.0.0", server_port=30001)