366 lines
15 KiB
Docker
366 lines
15 KiB
Docker
ARG CUDA_VERSION=12.8.1
|
|
ARG PYTHON_VERSION=3.12
|
|
|
|
|
|
ARG BUILD_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04
|
|
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
|
|
|
|
#################### BASE BUILD IMAGE ####################
|
|
# prepare basic build environment
|
|
FROM ${BUILD_BASE_IMAGE} AS base
|
|
ARG CUDA_VERSION
|
|
ARG PYTHON_VERSION
|
|
ARG TARGETPLATFORM
|
|
ARG INSTALL_KV_CONNECTORS=false
|
|
ENV DEBIAN_FRONTEND=noninteractive
|
|
|
|
ARG DEADSNAKES_MIRROR_URL
|
|
ARG DEADSNAKES_GPGKEY_URL
|
|
ARG GET_PIP_URL
|
|
|
|
# Install Python and other dependencies
|
|
RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \
|
|
&& echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \
|
|
&& apt-get update -y \
|
|
&& apt-get install -y ccache software-properties-common git curl sudo \
|
|
&& if [ ! -z ${DEADSNAKES_MIRROR_URL} ] ; then \
|
|
if [ ! -z "${DEADSNAKES_GPGKEY_URL}" ] ; then \
|
|
mkdir -p -m 0755 /etc/apt/keyrings ; \
|
|
curl -L ${DEADSNAKES_GPGKEY_URL} | gpg --dearmor > /etc/apt/keyrings/deadsnakes.gpg ; \
|
|
sudo chmod 644 /etc/apt/keyrings/deadsnakes.gpg ; \
|
|
echo "deb [signed-by=/etc/apt/keyrings/deadsnakes.gpg] ${DEADSNAKES_MIRROR_URL} $(lsb_release -cs) main" > /etc/apt/sources.list.d/deadsnakes.list ; \
|
|
fi ; \
|
|
else \
|
|
for i in 1 2 3; do \
|
|
add-apt-repository -y ppa:deadsnakes/ppa && break || \
|
|
{ echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
|
|
done ; \
|
|
fi \
|
|
&& apt-get update -y \
|
|
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
|
|
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
|
|
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
|
|
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
|
|
&& python${PYTHON_VERSION} -m ensurepip --upgrade \
|
|
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \
|
|
&& python3 --version && python3 -m pip --version
|
|
|
|
ARG PIP_INDEX_URL UV_INDEX_URL
|
|
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
|
|
ARG PYTORCH_CUDA_INDEX_BASE_URL
|
|
ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL
|
|
ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER
|
|
|
|
# Install uv for faster pip installs
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
python3 -m pip install uv
|
|
|
|
ENV UV_HTTP_TIMEOUT=500
|
|
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
|
|
|
RUN apt-get install -y gcc-10 g++-10
|
|
RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10
|
|
RUN <<EOF
|
|
gcc --version
|
|
EOF
|
|
|
|
|
|
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
|
|
|
|
WORKDIR /workspace
|
|
|
|
# install build and runtime dependencies
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
|
uv pip install --system \
|
|
--index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
|
|
"torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319"; \
|
|
uv pip install --system \
|
|
--index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
|
|
--pre pytorch_triton==3.3.0+gitab727c40; \
|
|
fi
|
|
|
|
COPY ./vllm_v0.10.0/requirements/common.txt requirements/common.txt
|
|
COPY ./vllm_v0.10.0/requirements/cuda.txt requirements/cuda.txt
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install --system -r requirements/cuda.txt \
|
|
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
|
|
|
|
ARG torch_cuda_arch_list='7.0 7.5 8.0 8.6 8.9 9.0 10.0 12.0'
|
|
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
|
|
# Override the arch list for flash-attn to reduce the binary size
|
|
ARG vllm_fa_cmake_gpu_arches='70-real;75-real;80-real;86-real;89-real;90-real;100-real;120-real'
|
|
ENV VLLM_FA_CMAKE_GPU_ARCHES=${vllm_fa_cmake_gpu_arches}
|
|
#################### BASE BUILD IMAGE ####################
|
|
|
|
#################### WHEEL BUILD IMAGE ####################
|
|
FROM base AS build
|
|
|
|
RUN curl -L -o sccache.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \
|
|
tar -xzf sccache.tar.gz && mv sccache-*/sccache /usr/local/bin/sccache && rm -rf sccache*
|
|
|
|
ARG TARGETPLATFORM
|
|
|
|
ARG PIP_INDEX_URL UV_INDEX_URL
|
|
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
|
|
ARG PYTORCH_CUDA_INDEX_BASE_URL
|
|
|
|
# 👇👇👇 这里添加一行
|
|
ENV SETUPTOOLS_SCM_PRETEND_VERSION=0.10.0
|
|
|
|
# install build dependencies
|
|
COPY ./vllm_v0.10.0/requirements/build.txt requirements/build.txt
|
|
|
|
ENV UV_HTTP_TIMEOUT=500
|
|
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install --system -r requirements/build.txt \
|
|
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
|
|
|
|
COPY ./vllm_v0.10.0 /workspace
|
|
# ARG GIT_REPO_CHECK=0
|
|
# RUN --mount=type=bind,source=.git,target=.git \
|
|
# if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi
|
|
|
|
# max jobs used by Ninja to build extensions
|
|
ARG max_jobs=96
|
|
ENV MAX_JOBS=${max_jobs}
|
|
# number of threads used by nvcc
|
|
ARG nvcc_threads=8
|
|
ENV NVCC_THREADS=$nvcc_threads
|
|
|
|
|
|
# Flag to control whether to use pre-built vLLM wheels
|
|
ARG VLLM_USE_PRECOMPILED
|
|
# TODO: in setup.py VLLM_USE_PRECOMPILED is sensitive to truthiness, it will take =0 as "true", this should be fixed
|
|
ENV VLLM_USE_PRECOMPILED=""
|
|
RUN if [ "${VLLM_USE_PRECOMPILED}" = "1" ]; then \
|
|
export VLLM_USE_PRECOMPILED=1 && \
|
|
echo "Using precompiled wheels"; \
|
|
else \
|
|
unset VLLM_USE_PRECOMPILED && \
|
|
echo "Leaving VLLM_USE_PRECOMPILED unset to build wheels from source"; \
|
|
fi
|
|
|
|
# if USE_SCCACHE is set, use sccache to speed up compilation
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
--mount=type=cache,target=/root/.cache/torch_extensions \
|
|
# --mount=type=bind,source=.git,target=.git \
|
|
if [ "$USE_SCCACHE" = "1" ]; then \
|
|
echo "Installing sccache..." \
|
|
&& curl -L -o sccache.tar.gz ${SCCACHE_DOWNLOAD_URL} \
|
|
&& tar -xzf sccache.tar.gz \
|
|
&& sudo mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/sccache \
|
|
&& rm -rf sccache.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl \
|
|
&& if [ ! -z ${SCCACHE_ENDPOINT} ] ; then export SCCACHE_ENDPOINT=${SCCACHE_ENDPOINT} ; fi \
|
|
&& export SCCACHE_BUCKET=${SCCACHE_BUCKET_NAME} \
|
|
&& export SCCACHE_REGION=${SCCACHE_REGION_NAME} \
|
|
&& export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \
|
|
&& export SCCACHE_IDLE_TIMEOUT=0 \
|
|
&& export CMAKE_BUILD_TYPE=Release \
|
|
&& sccache --show-stats \
|
|
&& python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38 \
|
|
&& sccache --show-stats; \
|
|
fi
|
|
|
|
ENV CCACHE_DIR=/root/.cache/ccache
|
|
RUN --mount=type=cache,target=/root/.cache/ccache \
|
|
--mount=type=cache,target=/root/.cache/uv \
|
|
# --mount=type=bind,source=.git,target=.git \
|
|
if [ "$USE_SCCACHE" != "1" ]; then \
|
|
# Clean any existing CMake artifacts
|
|
rm -rf .deps && \
|
|
mkdir -p .deps && \
|
|
python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38; \
|
|
fi
|
|
|
|
# Check the size of the wheel if RUN_WHEEL_CHECK is true
|
|
COPY ./vllm_v0.10.0/.buildkite/check-wheel-size.py check-wheel-size.py
|
|
# sync the default value with .buildkite/check-wheel-size.py
|
|
ARG VLLM_MAX_SIZE_MB=40000
|
|
ENV VLLM_MAX_SIZE_MB=$VLLM_MAX_SIZE_MB
|
|
ARG RUN_WHEEL_CHECK=true
|
|
RUN if [ "$RUN_WHEEL_CHECK" = "true" ]; then \
|
|
python3 check-wheel-size.py dist; \
|
|
else \
|
|
echo "Skipping wheel size check."; \
|
|
fi
|
|
#################### EXTENSION Build IMAGE ####################
|
|
|
|
#################### vLLM installation IMAGE ####################
|
|
FROM ${FINAL_BASE_IMAGE} AS vllm-base
|
|
ARG CUDA_VERSION
|
|
ARG PYTHON_VERSION
|
|
ARG INSTALL_KV_CONNECTORS=false
|
|
WORKDIR /vllm-workspace
|
|
ENV DEBIAN_FRONTEND=noninteractive
|
|
ARG TARGETPLATFORM
|
|
|
|
SHELL ["/bin/bash", "-c"]
|
|
|
|
ARG DEADSNAKES_MIRROR_URL
|
|
ARG DEADSNAKES_GPGKEY_URL
|
|
ARG GET_PIP_URL
|
|
|
|
RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
|
|
echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
|
|
|
|
# Install Python and other dependencies
|
|
RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \
|
|
&& echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \
|
|
&& apt-get update -y \
|
|
&& apt-get install -y ccache software-properties-common git curl wget sudo vim python3-pip \
|
|
&& apt-get install -y ffmpeg libsm6 libxext6 libgl1 \
|
|
&& if [ ! -z ${DEADSNAKES_MIRROR_URL} ] ; then \
|
|
if [ ! -z "${DEADSNAKES_GPGKEY_URL}" ] ; then \
|
|
mkdir -p -m 0755 /etc/apt/keyrings ; \
|
|
curl -L ${DEADSNAKES_GPGKEY_URL} | gpg --dearmor > /etc/apt/keyrings/deadsnakes.gpg ; \
|
|
sudo chmod 644 /etc/apt/keyrings/deadsnakes.gpg ; \
|
|
echo "deb [signed-by=/etc/apt/keyrings/deadsnakes.gpg] ${DEADSNAKES_MIRROR_URL} $(lsb_release -cs) main" > /etc/apt/sources.list.d/deadsnakes.list ; \
|
|
fi ; \
|
|
else \
|
|
for i in 1 2 3; do \
|
|
add-apt-repository -y ppa:deadsnakes/ppa && break || \
|
|
{ echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
|
|
done ; \
|
|
fi \
|
|
&& apt-get update -y \
|
|
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv libibverbs-dev \
|
|
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
|
|
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
|
|
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
|
|
&& python${PYTHON_VERSION} -m ensurepip --upgrade \
|
|
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \
|
|
&& python3 --version && python3 -m pip --version
|
|
|
|
ARG PIP_INDEX_URL UV_INDEX_URL
|
|
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
|
|
ARG PYTORCH_CUDA_INDEX_BASE_URL
|
|
ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL
|
|
ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER
|
|
|
|
# Install uv for faster pip installs
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
python3 -m pip install uv
|
|
|
|
# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out
|
|
# Reference: https://github.com/astral-sh/uv/pull/1694
|
|
ENV UV_HTTP_TIMEOUT=500
|
|
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
|
|
|
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
|
uv pip install --system \
|
|
--index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
|
|
"torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319" ; \
|
|
uv pip install --system \
|
|
--index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
|
|
--pre pytorch_triton==3.3.0+gitab727c40 ; \
|
|
fi
|
|
|
|
# Install vllm wheel first, so that torch etc will be installed.
|
|
RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \
|
|
--mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install --system dist/*.whl --verbose \
|
|
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
|
|
|
|
# Install FlashInfer from source
|
|
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
|
|
ARG FLASHINFER_GIT_REF="v0.2.8rc1"
|
|
RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH'
|
|
. /etc/environment
|
|
git clone --depth 1 --recursive --shallow-submodules \
|
|
--branch ${FLASHINFER_GIT_REF} \
|
|
${FLASHINFER_GIT_REPO} flashinfer
|
|
# Exclude CUDA arches for older versions (11.x and 12.0-12.7)
|
|
# TODO: Update this to allow setting TORCH_CUDA_ARCH_LIST as a build arg.
|
|
if [[ "${CUDA_VERSION}" == 11.* ]]; then
|
|
FI_TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 8.9"
|
|
elif [[ "${CUDA_VERSION}" == 12.[0-7]* ]]; then
|
|
FI_TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 8.9 9.0a"
|
|
else
|
|
# CUDA 12.8+ supports 10.0a and 12.0
|
|
FI_TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6 8.9 9.0a 10.0a 12.0"
|
|
|
|
fi
|
|
echo "🏗️ Building FlashInfer for arches: ${FI_TORCH_CUDA_ARCH_LIST}"
|
|
# Needed to build AOT kernels
|
|
pushd flashinfer
|
|
TORCH_CUDA_ARCH_LIST="${FI_TORCH_CUDA_ARCH_LIST}" \
|
|
python3 -m flashinfer.aot
|
|
TORCH_CUDA_ARCH_LIST="${FI_TORCH_CUDA_ARCH_LIST}" \
|
|
uv pip install --system --no-build-isolation .
|
|
popd
|
|
rm -rf flashinfer \
|
|
~/.cache/flashinfer/aot/*/unused*
|
|
|
|
BASH
|
|
COPY ./vllm_v0.10.0/examples examples
|
|
COPY ./vllm_v0.10.0/benchmarks benchmarks
|
|
COPY ./vllm_v0.10.0/vllm/collect_env.py .
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
. /etc/environment && \
|
|
uv pip list
|
|
|
|
|
|
COPY ./vllm_v0.10.0/requirements/build.txt requirements/build.txt
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install --system -r requirements/build.txt \
|
|
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
|
|
|
|
#################### vLLM installation IMAGE ####################
|
|
|
|
|
|
#################### OPENAI API SERVER ####################
|
|
# base openai image with additional requirements, for any subsequent openai-style images
|
|
FROM vllm-base AS vllm-openai-base
|
|
ARG TARGETPLATFORM
|
|
ARG INSTALL_KV_CONNECTORS=false
|
|
|
|
# ---- Add Tini as the container init process
|
|
ENV TINI_VERSION=v0.19.0
|
|
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini
|
|
RUN chmod +x /tini
|
|
ENTRYPOINT ["/tini", "--"]
|
|
|
|
ARG PIP_INDEX_URL UV_INDEX_URL
|
|
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
|
|
|
|
# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out
|
|
# Reference: https://github.com/astral-sh/uv/pull/1694
|
|
ENV UV_HTTP_TIMEOUT=500
|
|
|
|
COPY ./vllm_v0.10.0/requirements/kv_connectors.txt requirements/kv_connectors.txt
|
|
|
|
# install additional dependencies for openai api server
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
if [ "$INSTALL_KV_CONNECTORS" = "true" ]; then \
|
|
uv pip install --system -r ./vllm_v0.10.0/requirements/kv_connectors.txt; \
|
|
fi; \
|
|
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
|
BITSANDBYTES_VERSION="0.42.0"; \
|
|
else \
|
|
BITSANDBYTES_VERSION="0.46.1"; \
|
|
fi; \
|
|
uv pip install --system accelerate hf_transfer modelscope "bitsandbytes>=${BITSANDBYTES_VERSION}" 'timm==0.9.10' boto3 runai-model-streamer runai-model-streamer[s3]
|
|
|
|
ENV VLLM_USAGE_SOURCE production-docker-image
|
|
|
|
# ---- Install supervisord ----
|
|
RUN apt-get update && apt-get install -y supervisor && mkdir -p /etc/supervisor/conf.d
|
|
|
|
# ---- Copy UI and supervisor config ----
|
|
COPY ./meta_ui.py /app/meta_ui.py
|
|
COPY ./supervisord.conf /etc/supervisor/supervisord.conf
|
|
COPY ./Alibaba/Qwen3-30B-A3B /root/.cradle/Alibaba/Qwen3-30B-A3B
|
|
|
|
FROM vllm-openai-base AS vllm-openai
|
|
|
|
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
|
#################### OPENAI API SERVER ####################
|