From fd41d59cd7df5996d99e03127605808c4fe461f6 Mon Sep 17 00:00:00 2001 From: hailin Date: Sat, 20 Sep 2025 12:36:37 +0800 Subject: [PATCH] . --- Dockerfile | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 9ca4d70c5..dae0156b4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -67,14 +67,17 @@ RUN python3 -c "import torch, typing_extensions, sympy, jinja2, fsspec, networkx # ── 编译 torchvision 0.22.1 (依赖本地 torch) ──────────────────────────────── WORKDIR /opt -RUN git clone -b v0.22.1 https://github.com/pytorch/vision.git +# RUN git clone -b v0.22.1 https://github.com/pytorch/vision.git +COPY ./vision_0.22.1/ /opt/vision WORKDIR /opt/vision -RUN python3 setup.py bdist_wheel +RUN python3 setup.py bdist_wheel && \ + pip install --no-cache-dir --no-deps dist/torchvision-*.whl # ── 编译 flashinfer (主分支支持 torch 2.7 / cu126) ───────────────────────── WORKDIR /opt -RUN git clone --recursive -b v0.3.1 https://github.com/flashinfer-ai/flashinfer.git +# RUN git clone --recursive -b v0.3.1 https://github.com/flashinfer-ai/flashinfer.git +COPY ./flashinfer_0.3.1/ /opt/flashinfer WORKDIR /opt/flashinfer @@ -85,7 +88,8 @@ ENV FLASHINFER_CUDA_ARCH_LIST="7.5 8.0 8.6 8.9" RUN python3 -m pip install --no-cache-dir numpy requests build "cuda-python>=12.0,<13" "nvidia-nvshmem-cu12" ninja pynvml && \ python3 -m flashinfer.aot && \ python3 -m build --no-isolation --wheel && \ - ls -lh dist/ + ls -lh dist/ \ + && python3 -m pip install --no-cache-dir --no-deps dist/*.whl COPY ./sglang /sgl/sglang