diff --git a/meta_ui.py b/meta_ui.py index fe7cf8a..7bed530 100644 --- a/meta_ui.py +++ b/meta_ui.py @@ -1,92 +1,104 @@ import gradio as gr -import requests -import json +import requests, json, textwrap, datetime -API_URL = "http://localhost:30000/generate" # ✅ 使用原生 generate 接口 -API_KEY = "token-abc123" +API_URL = "http://localhost:30000/generate" # 原生 generate +API_KEY = "token-abc123" MODEL_NAME = "Qwen3-14b-base" -# 🚫 不再拼接上下文,只保留用户当前输入 -def build_prompt(history, user_message): - return user_message +def now(): + return datetime.datetime.now().strftime("%H:%M:%S") +# ---------------------------------------- # 主对话函数 +# ---------------------------------------- def chat(user_message, history, max_tokens, temperature): - prompt = build_prompt(history, user_message) - headers = { "Authorization": f"Bearer {API_KEY}", - "Content-Type": "application/json" + "Content-Type": "application/json" } payload = { "model": MODEL_NAME, - "text": prompt, # ✅ generate 接口使用 text 字段 + "text": user_message, # generate 使用 text "max_tokens": max_tokens, "temperature": temperature } - print(f"\n🟡 [chat] 请求 payload:\n{json.dumps(payload, ensure_ascii=False, indent=2)}") + print(f"\n🟡 [{now()} chat] payload:\n{json.dumps(payload, ensure_ascii=False, indent=2)}") try: - response = requests.post(API_URL, headers=headers, json=payload, timeout=30) - print(f"🟢 [chat] HTTP 状态码: {response.status_code}") - print(f"🟢 [chat] 响应内容: {response.text}") + resp = requests.post(API_URL, headers=headers, json=payload, timeout=60) + print(f"🟢 [{now()} chat] status={resp.status_code}") + print(f"🟢 [{now()} chat] headers={resp.headers}") + print(f"🟢 [{now()} chat] body前400字={resp.text[:400]}") + + # —— 解析 —— # + if resp.status_code != 200: + return f"[⚠️ HTTP {resp.status_code}] {resp.text[:300]}" + + if not resp.text.strip(): # 空 body + return "[⚠️ 后端返回空响应]" + + try: + data = json.loads(resp.text) + return data.get("text", "").strip() or "[⚠️ JSON 中无 text 字段]" + except json.JSONDecodeError as je: + snippet = textwrap.shorten(resp.text, width=400, placeholder=" ...") + return f"[⚠️ JSON 解析失败] {je}\n原始片段:\n{snippet}" - if response.status_code == 200 and response.content: - result = response.json() - reply = result.get("text", "").strip() - if not reply: - reply = "[⚠️ 返回内容为空]" - else: - reply = f"[⚠️ 无效响应] 状态码: {response.status_code}, 内容: {response.text}" except Exception as e: - reply = f"[请求失败] {e}" + return f"[❌ 请求异常] {e}" - return reply - -# 测试 API 连通性 -def test_api_connection(max_tokens, temperature): +# ---------------------------------------- +# API 测试函数 +# ---------------------------------------- +def test_api(max_tokens, temperature): headers = { "Authorization": f"Bearer {API_KEY}", - "Content-Type": "application/json" + "Content-Type": "application/json" } payload = { "model": MODEL_NAME, - "text": "Ping?", + "text": "Ping?", "max_tokens": max_tokens, "temperature": temperature } - print(f"\n🔵 [test] 请求 payload:\n{json.dumps(payload, ensure_ascii=False, indent=2)}") + print(f"\n🔵 [{now()} test] payload:\n{json.dumps(payload, ensure_ascii=False, indent=2)}") try: resp = requests.post(API_URL, headers=headers, json=payload, timeout=10) - print(f"🟢 [test] HTTP 状态码: {resp.status_code}") - print(f"🟢 [test] 响应内容: {resp.text}") + print(f"🟢 [{now()} test] status={resp.status_code}") + print(f"🟢 [{now()} test] body={resp.text}") + + if resp.status_code != 200 or not resp.text.strip(): + return f"⚠️ 状态{resp.status_code} or 空响应: {resp.text[:200]}" + + try: + out = json.loads(resp.text).get("text", "").strip() + return f"✅ API 正常,返回: {out or '[空文本]'}" + except json.JSONDecodeError as je: + return f"⚠️ JSON 解析失败: {je} 片段:{resp.text[:200]}" - if resp.status_code == 200 and resp.content: - out = resp.json().get("text", "").strip() - return f"✅ API 可用,响应: {out or '[空响应]'}" - else: - return f"⚠️ 非预期响应: 状态码: {resp.status_code}, 内容: {resp.text}" except Exception as e: - return f"❌ API 请求失败: {e}" + return f"❌ 请求异常: {e}" +# ---------------------------------------- # Gradio UI +# ---------------------------------------- with gr.Blocks(title="Base 模型测试 UI") as demo: gr.Markdown("# 💬 Base 模型对话界面") with gr.Row(): - max_tokens = gr.Slider(32, 131072, value=8192, label="max_tokens") + max_tokens = gr.Slider(32, 32768, value=2048, label="max_tokens") # 先别直接 8k+ temperature = gr.Slider(0.0, 1.5, value=0.7, step=0.05, label="temperature") - test_btn = gr.Button("🔁 测试 API 可用性") + test_btn = gr.Button("🔁 测试 API 可用性") test_output = gr.Textbox(label="API 测试结果", interactive=False) chatbot = gr.ChatInterface( - fn=lambda msg, hist: chat(msg, hist, max_tokens.value, temperature.value), + fn=lambda m, h: chat(m, h, max_tokens.value, temperature.value), title=None ) - test_btn.click(fn=test_api_connection, inputs=[max_tokens, temperature], outputs=test_output) + test_btn.click(fn=test_api, inputs=[max_tokens, temperature], outputs=test_output) demo.launch(server_name="0.0.0.0", server_port=30001)