This commit is contained in:
hailin 2025-08-04 12:40:45 +08:00
parent 38d813617c
commit fc62ba578c
1 changed files with 438 additions and 522 deletions

View File

@ -1,525 +1,441 @@
# ARG CUDA_VERSION=12.8.1
# The vLLM Dockerfile is used to construct vLLM image that can be directly used # ARG PYTHON_VERSION=3.12
# to run the OpenAI compatible server.
# Please update any changes made here to # ARG BUILD_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04
# docs/contributing/dockerfile/dockerfile.md and # ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
# docs/assets/contributing/dockerfile-stages-dependency.png
ARG CUDA_VERSION=12.8.1 # ARG DEADSNAKES_MIRROR_URL
ARG PYTHON_VERSION=3.12 # ARG DEADSNAKES_GPGKEY_URL
# By parameterizing the base images, we allow third-party to use their own # ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py"
# base images. One use case is hermetic builds with base images stored in
# private registries that use a different repository naming conventions. # ARG PIP_INDEX_URL
# # ARG PIP_EXTRA_INDEX_URL
# Example: # ARG UV_INDEX_URL=${PIP_INDEX_URL}
# docker build --build-arg BUILD_BASE_IMAGE=registry.acme.org/mirror/nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 # ARG UV_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}
ARG BUILD_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 # # PyTorch provides its own indexes for standard and nightly builds
# ARG PYTORCH_CUDA_INDEX_BASE_URL=https://download.pytorch.org/whl
# By parameterizing the Deadsnakes repository URL, we allow third-party to use # ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL=https://download.pytorch.org/whl/nightly
# their own mirror. When doing so, we don't benefit from the transparent
# installation of the GPG key of the PPA, as done by add-apt-repository, so we # ARG PIP_KEYRING_PROVIDER=disabled
# also need a URL for the GPG key. # ARG UV_KEYRING_PROVIDER=${PIP_KEYRING_PROVIDER}
ARG DEADSNAKES_MIRROR_URL
ARG DEADSNAKES_GPGKEY_URL # # Flag enables built-in KV-connector dependency libs into docker images
# ARG INSTALL_KV_CONNECTORS=false
# The PyPA get-pip.py script is a self contained script+zip file, that provides
# both the installer script and the pip base85-encoded zip archive. This allows # #################### BASE BUILD IMAGE ####################
# bootstrapping pip in environment where a dsitribution package does not exist. # # prepare basic build environment
# # FROM ${BUILD_BASE_IMAGE} AS base
# By parameterizing the URL for get-pip.py installation script, we allow # ARG CUDA_VERSION
# third-party to use their own copy of the script stored in a private mirror. # ARG PYTHON_VERSION
# We set the default value to the PyPA owned get-pip.py script. # ARG TARGETPLATFORM
# # ARG INSTALL_KV_CONNECTORS=false
# Reference: https://pip.pypa.io/en/stable/installation/#get-pip-py # ENV DEBIAN_FRONTEND=noninteractive
ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py"
# ARG DEADSNAKES_MIRROR_URL
# PIP supports fetching the packages from custom indexes, allowing third-party # ARG DEADSNAKES_GPGKEY_URL
# to host the packages in private mirrors. The PIP_INDEX_URL and # ARG GET_PIP_URL
# PIP_EXTRA_INDEX_URL are standard PIP environment variables to override the
# default indexes. By letting them empty by default, PIP will use its default # # Install Python and other dependencies
# indexes if the build process doesn't override the indexes. # RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \
# # && echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \
# Uv uses different variables. We set them by default to the same values as # && apt-get update -y \
# PIP, but they can be overridden. # && apt-get install -y ccache software-properties-common git curl sudo \
ARG PIP_INDEX_URL # && if [ ! -z ${DEADSNAKES_MIRROR_URL} ] ; then \
ARG PIP_EXTRA_INDEX_URL # if [ ! -z "${DEADSNAKES_GPGKEY_URL}" ] ; then \
ARG UV_INDEX_URL=${PIP_INDEX_URL} # mkdir -p -m 0755 /etc/apt/keyrings ; \
ARG UV_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL} # curl -L ${DEADSNAKES_GPGKEY_URL} | gpg --dearmor > /etc/apt/keyrings/deadsnakes.gpg ; \
# sudo chmod 644 /etc/apt/keyrings/deadsnakes.gpg ; \
# PyTorch provides its own indexes for standard and nightly builds # echo "deb [signed-by=/etc/apt/keyrings/deadsnakes.gpg] ${DEADSNAKES_MIRROR_URL} $(lsb_release -cs) main" > /etc/apt/sources.list.d/deadsnakes.list ; \
ARG PYTORCH_CUDA_INDEX_BASE_URL=https://download.pytorch.org/whl # fi ; \
ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL=https://download.pytorch.org/whl/nightly # else \
# for i in 1 2 3; do \
# PIP supports multiple authentication schemes, including keyring # add-apt-repository -y ppa:deadsnakes/ppa && break || \
# By parameterizing the PIP_KEYRING_PROVIDER variable and setting it to # { echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
# disabled by default, we allow third-party to use keyring authentication for # done ; \
# their private Python indexes, while not changing the default behavior which # fi \
# is no authentication. # && apt-get update -y \
# # && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
# Reference: https://pip.pypa.io/en/stable/topics/authentication/#keyring-support # && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
ARG PIP_KEYRING_PROVIDER=disabled # && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
ARG UV_KEYRING_PROVIDER=${PIP_KEYRING_PROVIDER} # && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
# && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \
# Flag enables built-in KV-connector dependency libs into docker images # && python3 --version && python3 -m pip --version
ARG INSTALL_KV_CONNECTORS=false
# ARG PIP_INDEX_URL UV_INDEX_URL
#################### BASE BUILD IMAGE #################### # ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
# prepare basic build environment # ARG PYTORCH_CUDA_INDEX_BASE_URL
FROM ${BUILD_BASE_IMAGE} AS base # ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL
ARG CUDA_VERSION # ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER
ARG PYTHON_VERSION
ARG TARGETPLATFORM # # Install uv for faster pip installs
ARG INSTALL_KV_CONNECTORS=false # RUN --mount=type=cache,target=/root/.cache/uv \
ENV DEBIAN_FRONTEND=noninteractive # python3 -m pip install uv
ARG DEADSNAKES_MIRROR_URL # ENV UV_HTTP_TIMEOUT=500
ARG DEADSNAKES_GPGKEY_URL # ENV UV_INDEX_STRATEGY="unsafe-best-match"
ARG GET_PIP_URL
# RUN apt-get install -y gcc-10 g++-10
# Install Python and other dependencies # RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10
RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ # RUN <<EOF
&& echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \ # gcc --version
&& apt-get update -y \ # EOF
&& apt-get install -y ccache software-properties-common git curl sudo \
&& if [ ! -z ${DEADSNAKES_MIRROR_URL} ] ; then \
if [ ! -z "${DEADSNAKES_GPGKEY_URL}" ] ; then \ # RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
mkdir -p -m 0755 /etc/apt/keyrings ; \
curl -L ${DEADSNAKES_GPGKEY_URL} | gpg --dearmor > /etc/apt/keyrings/deadsnakes.gpg ; \ # WORKDIR /workspace
sudo chmod 644 /etc/apt/keyrings/deadsnakes.gpg ; \
echo "deb [signed-by=/etc/apt/keyrings/deadsnakes.gpg] ${DEADSNAKES_MIRROR_URL} $(lsb_release -cs) main" > /etc/apt/sources.list.d/deadsnakes.list ; \ # # install build and runtime dependencies
fi ; \
else \ # RUN --mount=type=cache,target=/root/.cache/uv \
for i in 1 2 3; do \ # if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
add-apt-repository -y ppa:deadsnakes/ppa && break || \ # uv pip install --system \
{ echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \ # --index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
done ; \ # "torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319"; \
fi \ # uv pip install --system \
&& apt-get update -y \ # --index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ # --pre pytorch_triton==3.3.0+gitab727c40; \
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ # fi
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ # COPY requirements/common.txt requirements/common.txt
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ # COPY requirements/cuda.txt requirements/cuda.txt
&& python3 --version && python3 -m pip --version # RUN --mount=type=cache,target=/root/.cache/uv \
# uv pip install --system -r requirements/cuda.txt \
ARG PIP_INDEX_URL UV_INDEX_URL # --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
ARG PYTORCH_CUDA_INDEX_BASE_URL # ARG torch_cuda_arch_list='7.0 7.5 8.0 8.9 9.0 10.0 12.0'
ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL # ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list}
ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER # # Override the arch list for flash-attn to reduce the binary size
# ARG vllm_fa_cmake_gpu_arches='80-real;90-real'
# Install uv for faster pip installs # ENV VLLM_FA_CMAKE_GPU_ARCHES=${vllm_fa_cmake_gpu_arches}
RUN --mount=type=cache,target=/root/.cache/uv \ # #################### BASE BUILD IMAGE ####################
python3 -m pip install uv
# #################### WHEEL BUILD IMAGE ####################
# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out # FROM base AS build
# Reference: https://github.com/astral-sh/uv/pull/1694 # ARG TARGETPLATFORM
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match" # ARG PIP_INDEX_URL UV_INDEX_URL
# ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
# Upgrade to GCC 10 to avoid https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92519 # ARG PYTORCH_CUDA_INDEX_BASE_URL
# as it was causing spam when compiling the CUTLASS kernels
RUN apt-get install -y gcc-10 g++-10 # # install build dependencies
RUN update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 110 --slave /usr/bin/g++ g++ /usr/bin/g++-10 # COPY requirements/build.txt requirements/build.txt
RUN <<EOF
gcc --version # ENV UV_HTTP_TIMEOUT=500
EOF # ENV UV_INDEX_STRATEGY="unsafe-best-match"
# Workaround for https://github.com/openai/triton/issues/2507 and # RUN --mount=type=cache,target=/root/.cache/uv \
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully # uv pip install --system -r requirements/build.txt \
# this won't be needed for future versions of this docker image # --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
# or future versions of triton.
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/ # COPY . .
# ARG GIT_REPO_CHECK=0
WORKDIR /workspace # RUN --mount=type=bind,source=.git,target=.git \
# if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi
# install build and runtime dependencies
# # max jobs used by Ninja to build extensions
# arm64 (GH200) build follows the practice of "use existing pytorch" build, # ARG max_jobs=2
# we need to install torch and torchvision from the nightly builds first, # ENV MAX_JOBS=${max_jobs}
# pytorch will not appear as a vLLM dependency in all of the following steps # # number of threads used by nvcc
# after this step # ARG nvcc_threads=8
RUN --mount=type=cache,target=/root/.cache/uv \ # ENV NVCC_THREADS=$nvcc_threads
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
uv pip install --system \ # ARG USE_SCCACHE
--index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \ # ARG SCCACHE_DOWNLOAD_URL=https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz
"torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319"; \ # ARG SCCACHE_ENDPOINT
uv pip install --system \ # ARG SCCACHE_BUCKET_NAME=vllm-build-sccache
--index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \ # ARG SCCACHE_REGION_NAME=us-west-2
--pre pytorch_triton==3.3.0+gitab727c40; \ # ARG SCCACHE_S3_NO_CREDENTIALS=0
fi
# # Flag to control whether to use pre-built vLLM wheels
COPY requirements/common.txt requirements/common.txt # ARG VLLM_USE_PRECOMPILED
COPY requirements/cuda.txt requirements/cuda.txt # # TODO: in setup.py VLLM_USE_PRECOMPILED is sensitive to truthiness, it will take =0 as "true", this should be fixed
RUN --mount=type=cache,target=/root/.cache/uv \ # ENV VLLM_USE_PRECOMPILED=""
uv pip install --system -r requirements/cuda.txt \ # RUN if [ "${VLLM_USE_PRECOMPILED}" = "1" ]; then \
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') # export VLLM_USE_PRECOMPILED=1 && \
# echo "Using precompiled wheels"; \
# cuda arch list used by torch # else \
# can be useful for both `dev` and `test` # unset VLLM_USE_PRECOMPILED && \
# explicitly set the list to avoid issues with torch 2.2 # echo "Leaving VLLM_USE_PRECOMPILED unset to build wheels from source"; \
# see https://github.com/pytorch/pytorch/pull/123243 # fi
ARG torch_cuda_arch_list='7.0 7.5 8.0 8.9 9.0 10.0 12.0'
ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} # # if USE_SCCACHE is set, use sccache to speed up compilation
# Override the arch list for flash-attn to reduce the binary size # RUN --mount=type=cache,target=/root/.cache/uv \
ARG vllm_fa_cmake_gpu_arches='80-real;90-real' # --mount=type=bind,source=.git,target=.git \
ENV VLLM_FA_CMAKE_GPU_ARCHES=${vllm_fa_cmake_gpu_arches} # if [ "$USE_SCCACHE" = "1" ]; then \
#################### BASE BUILD IMAGE #################### # echo "Installing sccache..." \
# && curl -L -o sccache.tar.gz ${SCCACHE_DOWNLOAD_URL} \
#################### WHEEL BUILD IMAGE #################### # && tar -xzf sccache.tar.gz \
FROM base AS build # && sudo mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/sccache \
ARG TARGETPLATFORM # && rm -rf sccache.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl \
# && if [ ! -z ${SCCACHE_ENDPOINT} ] ; then export SCCACHE_ENDPOINT=${SCCACHE_ENDPOINT} ; fi \
ARG PIP_INDEX_URL UV_INDEX_URL # && export SCCACHE_BUCKET=${SCCACHE_BUCKET_NAME} \
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL # && export SCCACHE_REGION=${SCCACHE_REGION_NAME} \
ARG PYTORCH_CUDA_INDEX_BASE_URL # && export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \
# && export SCCACHE_IDLE_TIMEOUT=0 \
# install build dependencies # && export CMAKE_BUILD_TYPE=Release \
COPY requirements/build.txt requirements/build.txt # && sccache --show-stats \
# && python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38 \
# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out # && sccache --show-stats; \
# Reference: https://github.com/astral-sh/uv/pull/1694 # fi
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match" # ENV CCACHE_DIR=/root/.cache/ccache
# RUN --mount=type=cache,target=/root/.cache/ccache \
RUN --mount=type=cache,target=/root/.cache/uv \ # --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/build.txt \ # --mount=type=bind,source=.git,target=.git \
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') # if [ "$USE_SCCACHE" != "1" ]; then \
# # Clean any existing CMake artifacts
COPY . . # rm -rf .deps && \
ARG GIT_REPO_CHECK=0 # mkdir -p .deps && \
RUN --mount=type=bind,source=.git,target=.git \ # python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38; \
if [ "$GIT_REPO_CHECK" != "0" ]; then bash tools/check_repo.sh ; fi # fi
# max jobs used by Ninja to build extensions # # Check the size of the wheel if RUN_WHEEL_CHECK is true
ARG max_jobs=2 # COPY .buildkite/check-wheel-size.py check-wheel-size.py
ENV MAX_JOBS=${max_jobs} # # sync the default value with .buildkite/check-wheel-size.py
# number of threads used by nvcc # ARG VLLM_MAX_SIZE_MB=400
ARG nvcc_threads=8 # ENV VLLM_MAX_SIZE_MB=$VLLM_MAX_SIZE_MB
ENV NVCC_THREADS=$nvcc_threads # ARG RUN_WHEEL_CHECK=true
# RUN if [ "$RUN_WHEEL_CHECK" = "true" ]; then \
ARG USE_SCCACHE # python3 check-wheel-size.py dist; \
ARG SCCACHE_DOWNLOAD_URL=https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz # else \
ARG SCCACHE_ENDPOINT # echo "Skipping wheel size check."; \
ARG SCCACHE_BUCKET_NAME=vllm-build-sccache # fi
ARG SCCACHE_REGION_NAME=us-west-2 # #################### EXTENSION Build IMAGE ####################
ARG SCCACHE_S3_NO_CREDENTIALS=0
# #################### DEV IMAGE ####################
# Flag to control whether to use pre-built vLLM wheels # FROM base AS dev
ARG VLLM_USE_PRECOMPILED
# TODO: in setup.py VLLM_USE_PRECOMPILED is sensitive to truthiness, it will take =0 as "true", this should be fixed # ARG PIP_INDEX_URL UV_INDEX_URL
ENV VLLM_USE_PRECOMPILED="" # ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
RUN if [ "${VLLM_USE_PRECOMPILED}" = "1" ]; then \ # ARG PYTORCH_CUDA_INDEX_BASE_URL
export VLLM_USE_PRECOMPILED=1 && \
echo "Using precompiled wheels"; \ # ENV UV_HTTP_TIMEOUT=500
else \ # ENV UV_INDEX_STRATEGY="unsafe-best-match"
unset VLLM_USE_PRECOMPILED && \
echo "Leaving VLLM_USE_PRECOMPILED unset to build wheels from source"; \ # # Workaround for #17068
fi # RUN --mount=type=cache,target=/root/.cache/uv \
# uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@v2.2.4"
# if USE_SCCACHE is set, use sccache to speed up compilation
RUN --mount=type=cache,target=/root/.cache/uv \ # COPY requirements/lint.txt requirements/lint.txt
--mount=type=bind,source=.git,target=.git \ # COPY requirements/test.txt requirements/test.txt
if [ "$USE_SCCACHE" = "1" ]; then \ # COPY requirements/dev.txt requirements/dev.txt
echo "Installing sccache..." \ # RUN --mount=type=cache,target=/root/.cache/uv \
&& curl -L -o sccache.tar.gz ${SCCACHE_DOWNLOAD_URL} \ # uv pip install --system -r requirements/dev.txt \
&& tar -xzf sccache.tar.gz \ # --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
&& sudo mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/sccache \ # #################### DEV IMAGE ####################
&& rm -rf sccache.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl \
&& if [ ! -z ${SCCACHE_ENDPOINT} ] ; then export SCCACHE_ENDPOINT=${SCCACHE_ENDPOINT} ; fi \ # #################### vLLM installation IMAGE ####################
&& export SCCACHE_BUCKET=${SCCACHE_BUCKET_NAME} \ # FROM ${FINAL_BASE_IMAGE} AS vllm-base
&& export SCCACHE_REGION=${SCCACHE_REGION_NAME} \ # ARG CUDA_VERSION
&& export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \ # ARG PYTHON_VERSION
&& export SCCACHE_IDLE_TIMEOUT=0 \ # ARG INSTALL_KV_CONNECTORS=false
&& export CMAKE_BUILD_TYPE=Release \ # WORKDIR /vllm-workspace
&& sccache --show-stats \ # ENV DEBIAN_FRONTEND=noninteractive
&& python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38 \ # ARG TARGETPLATFORM
&& sccache --show-stats; \
fi # SHELL ["/bin/bash", "-c"]
ENV CCACHE_DIR=/root/.cache/ccache # ARG DEADSNAKES_MIRROR_URL
RUN --mount=type=cache,target=/root/.cache/ccache \ # ARG DEADSNAKES_GPGKEY_URL
--mount=type=cache,target=/root/.cache/uv \ # ARG GET_PIP_URL
--mount=type=bind,source=.git,target=.git \
if [ "$USE_SCCACHE" != "1" ]; then \ # RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
# Clean any existing CMake artifacts # echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
rm -rf .deps && \
mkdir -p .deps && \ # # Install Python and other dependencies
python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38; \ # RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \
fi # && echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \
# && apt-get update -y \
# Check the size of the wheel if RUN_WHEEL_CHECK is true # && apt-get install -y ccache software-properties-common git curl wget sudo vim python3-pip \
COPY .buildkite/check-wheel-size.py check-wheel-size.py # && apt-get install -y ffmpeg libsm6 libxext6 libgl1 \
# sync the default value with .buildkite/check-wheel-size.py # && if [ ! -z ${DEADSNAKES_MIRROR_URL} ] ; then \
ARG VLLM_MAX_SIZE_MB=400 # if [ ! -z "${DEADSNAKES_GPGKEY_URL}" ] ; then \
ENV VLLM_MAX_SIZE_MB=$VLLM_MAX_SIZE_MB # mkdir -p -m 0755 /etc/apt/keyrings ; \
ARG RUN_WHEEL_CHECK=true # curl -L ${DEADSNAKES_GPGKEY_URL} | gpg --dearmor > /etc/apt/keyrings/deadsnakes.gpg ; \
RUN if [ "$RUN_WHEEL_CHECK" = "true" ]; then \ # sudo chmod 644 /etc/apt/keyrings/deadsnakes.gpg ; \
python3 check-wheel-size.py dist; \ # echo "deb [signed-by=/etc/apt/keyrings/deadsnakes.gpg] ${DEADSNAKES_MIRROR_URL} $(lsb_release -cs) main" > /etc/apt/sources.list.d/deadsnakes.list ; \
else \ # fi ; \
echo "Skipping wheel size check."; \ # else \
fi # for i in 1 2 3; do \
#################### EXTENSION Build IMAGE #################### # add-apt-repository -y ppa:deadsnakes/ppa && break || \
# { echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
#################### DEV IMAGE #################### # done ; \
FROM base AS dev # fi \
# && apt-get update -y \
ARG PIP_INDEX_URL UV_INDEX_URL # && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv libibverbs-dev \
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL # && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
ARG PYTORCH_CUDA_INDEX_BASE_URL # && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
# && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out # && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \
# Reference: https://github.com/astral-sh/uv/pull/1694 # && python3 --version && python3 -m pip --version
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match" # ARG PIP_INDEX_URL UV_INDEX_URL
# ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
# Workaround for #17068 # ARG PYTORCH_CUDA_INDEX_BASE_URL
RUN --mount=type=cache,target=/root/.cache/uv \ # ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL
uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@v2.2.4" # ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER
COPY requirements/lint.txt requirements/lint.txt # # Install uv for faster pip installs
COPY requirements/test.txt requirements/test.txt # RUN --mount=type=cache,target=/root/.cache/uv \
COPY requirements/dev.txt requirements/dev.txt # python3 -m pip install uv
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/dev.txt \ # # This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') # # Reference: https://github.com/astral-sh/uv/pull/1694
#################### DEV IMAGE #################### # ENV UV_HTTP_TIMEOUT=500
# ENV UV_INDEX_STRATEGY="unsafe-best-match"
#################### vLLM installation IMAGE ####################
# image with vLLM installed # RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
# TODO: Restore to base image after FlashInfer AOT wheel fixed
FROM ${FINAL_BASE_IMAGE} AS vllm-base # RUN --mount=type=cache,target=/root/.cache/uv \
ARG CUDA_VERSION # if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
ARG PYTHON_VERSION # uv pip install --system \
ARG INSTALL_KV_CONNECTORS=false # --index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
WORKDIR /vllm-workspace # "torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319" ; \
ENV DEBIAN_FRONTEND=noninteractive # uv pip install --system \
ARG TARGETPLATFORM # --index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
# --pre pytorch_triton==3.3.0+gitab727c40 ; \
SHELL ["/bin/bash", "-c"] # fi
ARG DEADSNAKES_MIRROR_URL # # Install vllm wheel first, so that torch etc will be installed.
ARG DEADSNAKES_GPGKEY_URL # RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \
ARG GET_PIP_URL # --mount=type=cache,target=/root/.cache/uv \
# uv pip install --system dist/*.whl --verbose \
RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \ # --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
# # Install FlashInfer from source
# Install Python and other dependencies # ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ # ARG FLASHINFER_GIT_REF="v0.2.8rc1"
&& echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \ # RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH'
&& apt-get update -y \ # . /etc/environment
&& apt-get install -y ccache software-properties-common git curl wget sudo vim python3-pip \ # git clone --depth 1 --recursive --shallow-submodules \
&& apt-get install -y ffmpeg libsm6 libxext6 libgl1 \ # --branch ${FLASHINFER_GIT_REF} \
&& if [ ! -z ${DEADSNAKES_MIRROR_URL} ] ; then \ # ${FLASHINFER_GIT_REPO} flashinfer
if [ ! -z "${DEADSNAKES_GPGKEY_URL}" ] ; then \ # # Exclude CUDA arches for older versions (11.x and 12.0-12.7)
mkdir -p -m 0755 /etc/apt/keyrings ; \ # # TODO: Update this to allow setting TORCH_CUDA_ARCH_LIST as a build arg.
curl -L ${DEADSNAKES_GPGKEY_URL} | gpg --dearmor > /etc/apt/keyrings/deadsnakes.gpg ; \ # if [[ "${CUDA_VERSION}" == 11.* ]]; then
sudo chmod 644 /etc/apt/keyrings/deadsnakes.gpg ; \ # FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9"
echo "deb [signed-by=/etc/apt/keyrings/deadsnakes.gpg] ${DEADSNAKES_MIRROR_URL} $(lsb_release -cs) main" > /etc/apt/sources.list.d/deadsnakes.list ; \ # elif [[ "${CUDA_VERSION}" == 12.[0-7]* ]]; then
fi ; \ # FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9 9.0a"
else \ # else
for i in 1 2 3; do \ # # CUDA 12.8+ supports 10.0a and 12.0
add-apt-repository -y ppa:deadsnakes/ppa && break || \ # FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9 9.0a 10.0a 12.0"
{ echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \ # fi
done ; \ # echo "🏗️ Building FlashInfer for arches: ${FI_TORCH_CUDA_ARCH_LIST}"
fi \ # # Needed to build AOT kernels
&& apt-get update -y \ # pushd flashinfer
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv libibverbs-dev \ # TORCH_CUDA_ARCH_LIST="${FI_TORCH_CUDA_ARCH_LIST}" \
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ # python3 -m flashinfer.aot
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ # TORCH_CUDA_ARCH_LIST="${FI_TORCH_CUDA_ARCH_LIST}" \
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ # uv pip install --system --no-build-isolation .
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ # popd
&& python3 --version && python3 -m pip --version # rm -rf flashinfer
# BASH
ARG PIP_INDEX_URL UV_INDEX_URL # COPY examples examples
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL # COPY benchmarks benchmarks
ARG PYTORCH_CUDA_INDEX_BASE_URL # COPY ./vllm_v0.10.0/vllm/collect_env.py .
ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL
ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER # RUN --mount=type=cache,target=/root/.cache/uv \
# . /etc/environment && \
# Install uv for faster pip installs # uv pip list
RUN --mount=type=cache,target=/root/.cache/uv \
python3 -m pip install uv
# COPY ./vllm_v0.10.0/requirements/build.txt requirements/build.txt
# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out # RUN --mount=type=cache,target=/root/.cache/uv \
# Reference: https://github.com/astral-sh/uv/pull/1694 # uv pip install --system -r ./vllm_v0.10.0/requirements/build.txt \
ENV UV_HTTP_TIMEOUT=500 # --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
ENV UV_INDEX_STRATEGY="unsafe-best-match"
# #################### vLLM installation IMAGE ####################
# Workaround for https://github.com/openai/triton/issues/2507 and
# https://github.com/pytorch/pytorch/issues/107960 -- hopefully # #################### TEST IMAGE ####################
# this won't be needed for future versions of this docker image # # image to run unit testing suite
# or future versions of triton. # # note that this uses vllm installed by `pip`
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/ # FROM vllm-base AS test
# arm64 (GH200) build follows the practice of "use existing pytorch" build, # ADD ./vllm_v0.10.0/. /vllm-workspace/
# we need to install torch and torchvision from the nightly builds first,
# pytorch will not appear as a vLLM dependency in all of the following steps # ARG PYTHON_VERSION
# after this step
RUN --mount=type=cache,target=/root/.cache/uv \ # ARG PIP_INDEX_URL UV_INDEX_URL
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ # ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
uv pip install --system \
--index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \ # # This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out
"torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319" ; \ # # Reference: https://github.com/astral-sh/uv/pull/1694
uv pip install --system \ # ENV UV_HTTP_TIMEOUT=500
--index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \ # ENV UV_INDEX_STRATEGY="unsafe-best-match"
--pre pytorch_triton==3.3.0+gitab727c40 ; \
fi # # Workaround for #17068
# RUN --mount=type=cache,target=/root/.cache/uv \
# Install vllm wheel first, so that torch etc will be installed. # uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@v2.2.4"
RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \
--mount=type=cache,target=/root/.cache/uv \ # # install development dependencies (for testing)
uv pip install --system dist/*.whl --verbose \ # RUN --mount=type=cache,target=/root/.cache/uv \
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') # CUDA_MAJOR="${CUDA_VERSION%%.*}"; \
# if [ "$CUDA_MAJOR" -ge 12 ]; then \
# If we need to build FlashInfer wheel before its release: # uv pip install --system -r requirements/dev.txt; \
# $ # Note we remove 7.0 from the arch list compared to the list below, since FlashInfer only supports sm75+ # fi
# $ export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0a 10.0a 12.0'
# $ git clone https://github.com/flashinfer-ai/flashinfer.git --recursive # # install development dependencies (for testing)
# $ cd flashinfer # RUN --mount=type=cache,target=/root/.cache/uv \
# $ git checkout v0.2.6.post1 # uv pip install --system -e tests/vllm_test_utils
# $ python -m flashinfer.aot
# $ python -m build --no-isolation --wheel # # enable fast downloads from hf (for testing)
# $ ls -la dist # RUN --mount=type=cache,target=/root/.cache/uv \
# -rw-rw-r-- 1 mgoin mgoin 205M Jun 9 18:03 flashinfer_python-0.2.6.post1-cp39-abi3-linux_x86_64.whl # uv pip install --system hf_transfer
# $ # upload the wheel to a public location, e.g. https://wheels.vllm.ai/flashinfer/v0.2.6.post1/flashinfer_python-0.2.6.post1-cp39-abi3-linux_x86_64.whl # ENV HF_HUB_ENABLE_HF_TRANSFER 1
# Install FlashInfer from source # # Copy in the v1 package for testing (it isn't distributed yet)
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git" # COPY ./vllm_v0.10.0/vllm/v1 /usr/local/lib/python${PYTHON_VERSION}/dist-packages/vllm/v1
ARG FLASHINFER_GIT_REF="v0.2.8rc1"
RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH' # # doc requires source code
. /etc/environment # # we hide them inside `test_docs/` , so that this source code
git clone --depth 1 --recursive --shallow-submodules \ # # will not be imported by other tests
--branch ${FLASHINFER_GIT_REF} \ # RUN mkdir test_docs
${FLASHINFER_GIT_REPO} flashinfer # RUN mv docs test_docs/
# Exclude CUDA arches for older versions (11.x and 12.0-12.7) # RUN cp -r ./vllm_v0.10.0/examples test_docs/
# TODO: Update this to allow setting TORCH_CUDA_ARCH_LIST as a build arg. # RUN mv vllm test_docs/
if [[ "${CUDA_VERSION}" == 11.* ]]; then # RUN mv mkdocs.yaml test_docs/
FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9" # #################### TEST IMAGE ####################
elif [[ "${CUDA_VERSION}" == 12.[0-7]* ]]; then
FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9 9.0a" # #################### OPENAI API SERVER ####################
else # # base openai image with additional requirements, for any subsequent openai-style images
# CUDA 12.8+ supports 10.0a and 12.0 # FROM vllm-base AS vllm-openai-base
FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9 9.0a 10.0a 12.0" # ARG TARGETPLATFORM
fi # ARG INSTALL_KV_CONNECTORS=false
echo "🏗️ Building FlashInfer for arches: ${FI_TORCH_CUDA_ARCH_LIST}"
# Needed to build AOT kernels # ARG PIP_INDEX_URL UV_INDEX_URL
pushd flashinfer # ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
TORCH_CUDA_ARCH_LIST="${FI_TORCH_CUDA_ARCH_LIST}" \
python3 -m flashinfer.aot # # This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out
TORCH_CUDA_ARCH_LIST="${FI_TORCH_CUDA_ARCH_LIST}" \ # # Reference: https://github.com/astral-sh/uv/pull/1694
uv pip install --system --no-build-isolation . # ENV UV_HTTP_TIMEOUT=500
popd
rm -rf flashinfer # COPY ./vllm_v0.10.0/requirements/kv_connectors.txt requirements/kv_connectors.txt
BASH
COPY examples examples # # install additional dependencies for openai api server
COPY benchmarks benchmarks # RUN --mount=type=cache,target=/root/.cache/uv \
COPY ./vllm/collect_env.py . # if [ "$INSTALL_KV_CONNECTORS" = "true" ]; then \
# uv pip install --system -r requirements/kv_connectors.txt; \
RUN --mount=type=cache,target=/root/.cache/uv \ # fi; \
. /etc/environment && \ # if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
uv pip list # BITSANDBYTES_VERSION="0.42.0"; \
# else \
# Even when we build Flashinfer with AOT mode, there's still # BITSANDBYTES_VERSION="0.46.1"; \
# some issues w.r.t. JIT compilation. Therefore we need to # fi; \
# install build dependencies for JIT compilation. # uv pip install --system accelerate hf_transfer modelscope "bitsandbytes>=${BITSANDBYTES_VERSION}" 'timm==0.9.10' boto3 runai-model-streamer runai-model-streamer[s3]
# TODO: Remove this once FlashInfer AOT wheel is fixed
COPY requirements/build.txt requirements/build.txt # ENV VLLM_USAGE_SOURCE production-docker-image
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r requirements/build.txt \ # # define sagemaker first, so it is not default from `docker build`
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') # FROM vllm-openai-base AS vllm-sagemaker
#################### vLLM installation IMAGE #################### # COPY ./vllm_v0.10.0/examples/online_serving/sagemaker-entrypoint.sh .
# RUN chmod +x sagemaker-entrypoint.sh
#################### TEST IMAGE #################### # ENTRYPOINT ["./sagemaker-entrypoint.sh"]
# image to run unit testing suite
# note that this uses vllm installed by `pip`
FROM vllm-base AS test
ADD . /vllm-workspace/
ARG PYTHON_VERSION
ARG PIP_INDEX_URL UV_INDEX_URL
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out
# Reference: https://github.com/astral-sh/uv/pull/1694
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
# Workaround for #17068
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@v2.2.4"
# install development dependencies (for testing)
RUN --mount=type=cache,target=/root/.cache/uv \
CUDA_MAJOR="${CUDA_VERSION%%.*}"; \
if [ "$CUDA_MAJOR" -ge 12 ]; then \
uv pip install --system -r requirements/dev.txt; \
fi
# install development dependencies (for testing)
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -e tests/vllm_test_utils
# enable fast downloads from hf (for testing)
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system hf_transfer
ENV HF_HUB_ENABLE_HF_TRANSFER 1
# Copy in the v1 package for testing (it isn't distributed yet)
COPY vllm/v1 /usr/local/lib/python${PYTHON_VERSION}/dist-packages/vllm/v1
# doc requires source code
# we hide them inside `test_docs/` , so that this source code
# will not be imported by other tests
RUN mkdir test_docs
RUN mv docs test_docs/
RUN cp -r examples test_docs/
RUN mv vllm test_docs/
RUN mv mkdocs.yaml test_docs/
#################### TEST IMAGE ####################
#################### OPENAI API SERVER ####################
# base openai image with additional requirements, for any subsequent openai-style images
FROM vllm-base AS vllm-openai-base
ARG TARGETPLATFORM
ARG INSTALL_KV_CONNECTORS=false
ARG PIP_INDEX_URL UV_INDEX_URL
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out
# Reference: https://github.com/astral-sh/uv/pull/1694
ENV UV_HTTP_TIMEOUT=500
COPY requirements/kv_connectors.txt requirements/kv_connectors.txt
# install additional dependencies for openai api server
RUN --mount=type=cache,target=/root/.cache/uv \
if [ "$INSTALL_KV_CONNECTORS" = "true" ]; then \
uv pip install --system -r requirements/kv_connectors.txt; \
fi; \
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
BITSANDBYTES_VERSION="0.42.0"; \
else \
BITSANDBYTES_VERSION="0.46.1"; \
fi; \
uv pip install --system accelerate hf_transfer modelscope "bitsandbytes>=${BITSANDBYTES_VERSION}" 'timm==0.9.10' boto3 runai-model-streamer runai-model-streamer[s3]
ENV VLLM_USAGE_SOURCE production-docker-image
# define sagemaker first, so it is not default from `docker build`
FROM vllm-openai-base AS vllm-sagemaker
COPY examples/online_serving/sagemaker-entrypoint.sh .
RUN chmod +x sagemaker-entrypoint.sh
ENTRYPOINT ["./sagemaker-entrypoint.sh"]
FROM vllm-openai-base AS vllm-openai FROM vllm-openai-base AS vllm-openai