This commit is contained in:
hailin 2025-07-27 18:18:31 +08:00
parent 44c3814d13
commit 4bb857f22f
2 changed files with 93 additions and 48 deletions

View File

@ -42,11 +42,16 @@ def consume_logs(dummy=None):
# ───────────────────── 后端调用 ───────────────────── # ───────────────────── 后端调用 ─────────────────────
def backend(text, sampling): def backend(text, sampling, api_suffix):
payload = {"model": MODEL_NAME, "text": text, "sampling_params": sampling} url = f"http://localhost:30000{api_suffix}"
log(f"\n🟡 [{now()}] payload\n{json.dumps(payload, ensure_ascii=False, indent=2)}") if api_suffix == "/generate":
payload = {"model": MODEL_NAME, "text": text, "sampling_params": sampling}
else: # "/v1/completion"
payload = {"model": MODEL_NAME, "prompt": text, **sampling}
log(f"\n🟡 [{now()}] POST {url}\n{json.dumps(payload, ensure_ascii=False, indent=2)}")
try: try:
r = requests.post(API_URL, r = requests.post(url,
headers={"Authorization": f"Bearer {API_KEY}", headers={"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"}, "Content-Type": "application/json"},
json=payload, timeout=180) json=payload, timeout=180)
@ -54,30 +59,38 @@ def backend(text, sampling):
data = r.json() data = r.json()
except Exception: except Exception:
data = {} data = {}
fr = data.get("meta_info", {}).get("finish_reason") if api_suffix == "/generate":
ctok = data.get("meta_info", {}).get("completion_tokens") txt = data.get("text", "").strip()
meta = data.get("meta_info", {})
fr = meta.get("finish_reason")
ctok = meta.get("completion_tokens")
else:
choice = data.get("choices", [{}])[0]
txt = choice.get("text", "").strip()
fr = choice.get("finish_reason")
ctok = data.get("usage", {}).get("completion_tokens")
log(f"🟢 [{now()}] HTTP {r.status_code} tokens={ctok} finish={fr}\n" log(f"🟢 [{now()}] HTTP {r.status_code} tokens={ctok} finish={fr}\n"
f"🟢 resp800={r.text[:800]!r}") f"🟢 resp800={r.text[:800]!r}")
if r.status_code != 200: if r.status_code != 200:
return f"[HTTP {r.status_code}] {r.text[:300]}" return f"[HTTP {r.status_code}] {r.text[:300]}"
return data.get("text", "").strip() or "[⚠ 空]" return txt or "[⚠ 空]"
except Exception as e: except Exception as e:
log(f"[❌ 请求异常] {e}") log(f"[❌ 请求异常] {e}")
return f"[❌ 请求异常] {e}" return f"[❌ 请求异常] {e}"
# ───────────────────── Chat 回调 ───────────────────── # ───────────────────── Chat 回调 ─────────────────────
def chat( def chat(
user, history, user, history,
max_new, temp, top_p, top_k, max_new, temp, top_p, top_k,
rep_pen, pres_pen, stop_raw, rep_pen, pres_pen, stop_raw,
log_state api_suffix, log_state
): ):
import threading
from queue import Queue, Empty from queue import Queue, Empty
stop = [s.strip() for s in stop_raw.split(",") if s.strip()] or None stop = [s.strip() for s in stop_raw.split(",") if s.strip()] or None
samp = { samp = {
"max_new_tokens": int(max_new), ("max_tokens" if api_suffix == "/v1/completion" else "max_new_tokens"): int(max_new),
"temperature": temp, "temperature": temp,
"top_p": top_p, "top_p": top_p,
"top_k": int(top_k), "top_k": int(top_k),
@ -86,21 +99,17 @@ def chat(
**({"stop": stop} if stop else {}) **({"stop": stop} if stop else {})
} }
result_q = Queue() result_q = Queue()
# 后台线程执行 backend 推理
def worker(): def worker():
out = backend(user, samp) out = backend(user, samp, api_suffix)
result_q.put(out) result_q.put(out)
thread = threading.Thread(target=worker) threading.Thread(target=worker).start()
thread.start()
# 先返回提示
yield "⏳ 正在生成中...", log_state yield "⏳ 正在生成中...", log_state
# 每 0.1 秒轮询结果队列(避免阻塞 UI while True:
while thread.is_alive() or not result_q.empty():
try: try:
result = result_q.get(timeout=0.1) result = result_q.get(timeout=0.1)
yield result, log_state yield result, log_state
@ -112,6 +121,9 @@ def chat(
with gr.Blocks(title="调试界面") as demo: with gr.Blocks(title="调试界面") as demo:
gr.Markdown(f"## 💬 调试界面 \n权重 **{MODEL_PATH.name}**") gr.Markdown(f"## 💬 调试界面 \n权重 **{MODEL_PATH.name}**")
with gr.Row():
api_choice = gr.Dropdown(choices=["/generate", "/v1/completion"],
value="/generate", label="选择推理接口")
# 采样参数控件 # 采样参数控件
with gr.Row(): with gr.Row():
max_new = gr.Slider(32, 32768, 128, label="max_new_tokens") max_new = gr.Slider(32, 32768, 128, label="max_new_tokens")
@ -132,12 +144,14 @@ with gr.Blocks(title="调试界面") as demo:
chatbot = gr.ChatInterface( chatbot = gr.ChatInterface(
fn=chat, fn=chat,
additional_inputs=[max_new, temp, top_p, top_k, additional_inputs=[max_new, temp, top_p, top_k,
rep_pen, pres_pen, stop_txt, log_state], rep_pen, pres_pen, stop_txt,
api_choice, log_state], # ✅ 加入 dropdown
additional_outputs=[log_state], additional_outputs=[log_state],
type="messages" type="messages"
) )
# 日志刷新定时器 # 日志刷新定时器
timer = gr.Timer(1.0, render=True) timer = gr.Timer(1.0, render=True)
timer.tick( timer.tick(

View File

@ -20,16 +20,26 @@ now = lambda: datetime.datetime.now().strftime("%H:%M:%S")
# ───────────────────── 日志队列 ───────────────────── # ───────────────────── 日志队列 ─────────────────────
LOG_Q: "queue.Queue[str]" = queue.Queue() LOG_Q: "queue.Queue[str]" = queue.Queue()
LOG_TXT = "" # ✅ 全局日志缓存,避免 chat 焦点阻断 log_box 更新
def log(msg): # 写终端 + 推队列 def log(msg): # 写终端 + 推队列
print(msg, flush=True) print(msg, flush=True)
LOG_Q.put(msg) LOG_Q.put(msg)
def consume_logs(state_txt: str): prev_log_value = "" # 上一帧的日志内容
"""供 Interval 调用:把队列里所有新行取出拼接到 state"""
buf = deque(state_txt.splitlines(), maxlen=400) def consume_logs(dummy=None):
"""每秒更新 log_box 内容,避免 chat 阻塞 UI 刷新"""
global LOG_TXT, prev_log_value
buf = deque(LOG_TXT.splitlines(), maxlen=400)
while not LOG_Q.empty(): while not LOG_Q.empty():
buf.append(LOG_Q.get()) buf.append(LOG_Q.get())
return "\n".join(buf) LOG_TXT = "\n".join(buf)
if LOG_TXT != prev_log_value:
prev_log_value = LOG_TXT
return gr.update(value=LOG_TXT)
return gr.update() # 无更新则不触发前端刷新
# ───────────────────── 后端调用 ───────────────────── # ───────────────────── 后端调用 ─────────────────────
def backend(text, sampling): def backend(text, sampling):
@ -62,6 +72,9 @@ def chat(
rep_pen, pres_pen, stop_raw, rep_pen, pres_pen, stop_raw,
log_state log_state
): ):
import threading
from queue import Queue, Empty
stop = [s.strip() for s in stop_raw.split(",") if s.strip()] or None stop = [s.strip() for s in stop_raw.split(",") if s.strip()] or None
samp = { samp = {
"max_new_tokens": int(max_new), "max_new_tokens": int(max_new),
@ -69,12 +82,31 @@ def chat(
"top_p": top_p, "top_p": top_p,
"top_k": int(top_k), "top_k": int(top_k),
"repetition_penalty": rep_pen, "repetition_penalty": rep_pen,
"presence_penalty": pres_pen, "presence_penalty": pres_pen,
**({"stop": stop} if stop else {}) **({"stop": stop} if stop else {})
} }
out = backend(user, samp)
# 返回回答,同时把 log_state 原样带回(不刷新由 Interval 处理) result_q = Queue()
return out, log_state
# 后台线程执行 backend 推理
def worker():
out = backend(user, samp)
result_q.put(out)
thread = threading.Thread(target=worker)
thread.start()
# 先返回提示
yield "⏳ 正在生成中...", log_state
# 每 0.1 秒轮询结果队列(避免阻塞 UI
while thread.is_alive() or not result_q.empty():
try:
result = result_q.get(timeout=0.1)
yield result, log_state
except Empty:
continue
# ───────────────────── Gradio UI ───────────────────── # ───────────────────── Gradio UI ─────────────────────
with gr.Blocks(title="调试界面") as demo: with gr.Blocks(title="调试界面") as demo:
@ -82,7 +114,7 @@ with gr.Blocks(title="调试界面") as demo:
# 采样参数控件 # 采样参数控件
with gr.Row(): with gr.Row():
max_new = gr.Slider(32, 32768, 2048, label="max_new_tokens") max_new = gr.Slider(32, 32768, 128, label="max_new_tokens")
temp = gr.Slider(0, 1.5, 0.8, step=0.05, label="temperature") temp = gr.Slider(0, 1.5, 0.8, step=0.05, label="temperature")
with gr.Row(): with gr.Row():
top_p = gr.Slider(0, 1, 0.95, step=0.01, label="top_p") top_p = gr.Slider(0, 1, 0.95, step=0.01, label="top_p")
@ -92,31 +124,30 @@ with gr.Blocks(title="调试界面") as demo:
pres_pen= gr.Slider(0, 2, 0.0, step=0.05, label="presence_penalty") pres_pen= gr.Slider(0, 2, 0.0, step=0.05, label="presence_penalty")
stop_txt = gr.Textbox("", label="stop 序列(逗号分隔)") stop_txt = gr.Textbox("", label="stop 序列(逗号分隔)")
dbg_chk = gr.Checkbox(label="📜 显示 Debug 面板", value=True) log_state = gr.State("") # 状态透传
log_box = gr.Textbox(label="实时日志", lines=20, interactive=False, visible=True) dbg_chk = gr.Checkbox(label="📜 显示 Debug 面板", value=False) # ✅ 默认关闭
log_state= gr.State("") # 保存全部日志文本 log_box = gr.Textbox(label="实时日志", lines=20, interactive=False, visible=False) # ✅ 默认隐藏
# ────────────── 定时刷新日志 ────────────── # Chat 界面(移到日志之前)
timer = gr.Timer(1.0, render=False) # 每秒触发一次
timer.tick( # ⬅ 关键是用 .tick
fn=consume_logs, # 把队列里的新行合并
inputs=[log_box], # 直接取 Textbox 当前内容
outputs=[log_box], # 写回同一个 Textbox
)
# 显示到 log_box
log_state.change(lambda txt: gr.update(value=txt), log_state, log_box)
# Debug 面板可见性切换
dbg_chk.change(lambda v: gr.update(visible=v), dbg_chk, log_box)
# Chatbot
chatbot = gr.ChatInterface( chatbot = gr.ChatInterface(
fn=chat, fn=chat,
additional_inputs=[max_new, temp, top_p, top_k, additional_inputs=[max_new, temp, top_p, top_k,
rep_pen, pres_pen, stop_txt, log_state], rep_pen, pres_pen, stop_txt, log_state],
additional_outputs=[log_state], # ★ 必加 additional_outputs=[log_state],
type="messages" type="messages"
) )
# 日志刷新定时器
timer = gr.Timer(1.0, render=True)
timer.tick(
fn=consume_logs,
inputs=[],
outputs=[log_box],
)
log_state.change(lambda txt: gr.update(value=txt), log_state, log_box)
dbg_chk.change(lambda v: gr.update(visible=v), dbg_chk, log_box)
demo.launch(server_name="0.0.0.0", server_port=30001) demo.launch(server_name="0.0.0.0", server_port=30001)