diff --git a/Dockerfile b/Dockerfile index a4a07b4..e3438c4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -444,12 +444,13 @@ RUN --mount=type=cache,target=/root/.cache/uv \ ENV VLLM_USAGE_SOURCE production-docker-image +# ---- Install supervisord ---- RUN apt-get update && apt-get install -y supervisor && mkdir -p /etc/supervisor/conf.d -# 拷贝配置文件(假设你准备了) -COPY ./supervisord.conf /etc/supervisor/supervisord.conf +# ---- Copy UI and supervisor config ---- COPY ./meta_ui.py /app/meta_ui.py - +COPY ./supervisord.conf /etc/supervisor/supervisord.conf +COPY ./Alibaba/Qwen3-30B-A3B /root/.cradle/Alibaba/Qwen3-30B-A3B # # define sagemaker first, so it is not default from `docker build` # FROM vllm-openai-base AS vllm-sagemaker @@ -460,5 +461,5 @@ COPY ./meta_ui.py /app/meta_ui.py FROM vllm-openai-base AS vllm-openai -ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"] #################### OPENAI API SERVER #################### diff --git a/supervisord.conf b/supervisord.conf index 3f4149a..aba7886 100644 --- a/supervisord.conf +++ b/supervisord.conf @@ -4,8 +4,8 @@ logfile=/dev/stdout logfile_maxbytes=0 loglevel=info -[program:sglang] -command=python3 -m sglang.launch_server --host 0.0.0.0 --port 30000 --model-path /root/.cradle/Alibaba/Qwen3-30B-A3B/ --tp 4 --api-key token-abc123 --enable-metrics +[program:vllm] +command=python3 -m vllm.entrypoints.openai.api_server --model /root/.cradle/Alibaba/Qwen3-30B-A3B --port 30000 --api-key token-abc123 autostart=true autorestart=true stdout_logfile=/dev/stdout