This commit is contained in:
hailin 2025-08-04 13:01:50 +08:00
parent 62ee2cade3
commit 5e1a8abfb4
1 changed files with 110 additions and 110 deletions

View File

@ -1,9 +1,9 @@
# ARG CUDA_VERSION=12.8.1
# ARG PYTHON_VERSION=3.12
ARG CUDA_VERSION=12.8.1
ARG PYTHON_VERSION=3.12
# ARG BUILD_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04
# ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
# ARG DEADSNAKES_MIRROR_URL
@ -229,127 +229,127 @@
# --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
# #################### DEV IMAGE ####################
# #################### vLLM installation IMAGE ####################
# FROM ${FINAL_BASE_IMAGE} AS vllm-base
# ARG CUDA_VERSION
# ARG PYTHON_VERSION
# ARG INSTALL_KV_CONNECTORS=false
# WORKDIR /vllm-workspace
# ENV DEBIAN_FRONTEND=noninteractive
# ARG TARGETPLATFORM
#################### vLLM installation IMAGE ####################
FROM ${FINAL_BASE_IMAGE} AS vllm-base
ARG CUDA_VERSION
ARG PYTHON_VERSION
ARG INSTALL_KV_CONNECTORS=false
WORKDIR /vllm-workspace
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETPLATFORM
# SHELL ["/bin/bash", "-c"]
SHELL ["/bin/bash", "-c"]
# ARG DEADSNAKES_MIRROR_URL
# ARG DEADSNAKES_GPGKEY_URL
# ARG GET_PIP_URL
ARG DEADSNAKES_MIRROR_URL
ARG DEADSNAKES_GPGKEY_URL
ARG GET_PIP_URL
# RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
# echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \
echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment
# # Install Python and other dependencies
# RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \
# && echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \
# && apt-get update -y \
# && apt-get install -y ccache software-properties-common git curl wget sudo vim python3-pip \
# && apt-get install -y ffmpeg libsm6 libxext6 libgl1 \
# && if [ ! -z ${DEADSNAKES_MIRROR_URL} ] ; then \
# if [ ! -z "${DEADSNAKES_GPGKEY_URL}" ] ; then \
# mkdir -p -m 0755 /etc/apt/keyrings ; \
# curl -L ${DEADSNAKES_GPGKEY_URL} | gpg --dearmor > /etc/apt/keyrings/deadsnakes.gpg ; \
# sudo chmod 644 /etc/apt/keyrings/deadsnakes.gpg ; \
# echo "deb [signed-by=/etc/apt/keyrings/deadsnakes.gpg] ${DEADSNAKES_MIRROR_URL} $(lsb_release -cs) main" > /etc/apt/sources.list.d/deadsnakes.list ; \
# fi ; \
# else \
# for i in 1 2 3; do \
# add-apt-repository -y ppa:deadsnakes/ppa && break || \
# { echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
# done ; \
# fi \
# && apt-get update -y \
# && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv libibverbs-dev \
# && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
# && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
# && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
# && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \
# && python3 --version && python3 -m pip --version
# Install Python and other dependencies
RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \
&& echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \
&& apt-get update -y \
&& apt-get install -y ccache software-properties-common git curl wget sudo vim python3-pip \
&& apt-get install -y ffmpeg libsm6 libxext6 libgl1 \
&& if [ ! -z ${DEADSNAKES_MIRROR_URL} ] ; then \
if [ ! -z "${DEADSNAKES_GPGKEY_URL}" ] ; then \
mkdir -p -m 0755 /etc/apt/keyrings ; \
curl -L ${DEADSNAKES_GPGKEY_URL} | gpg --dearmor > /etc/apt/keyrings/deadsnakes.gpg ; \
sudo chmod 644 /etc/apt/keyrings/deadsnakes.gpg ; \
echo "deb [signed-by=/etc/apt/keyrings/deadsnakes.gpg] ${DEADSNAKES_MIRROR_URL} $(lsb_release -cs) main" > /etc/apt/sources.list.d/deadsnakes.list ; \
fi ; \
else \
for i in 1 2 3; do \
add-apt-repository -y ppa:deadsnakes/ppa && break || \
{ echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \
done ; \
fi \
&& apt-get update -y \
&& apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv libibverbs-dev \
&& update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \
&& update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \
&& ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \
&& curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \
&& python3 --version && python3 -m pip --version
# ARG PIP_INDEX_URL UV_INDEX_URL
# ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
# ARG PYTORCH_CUDA_INDEX_BASE_URL
# ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL
# ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER
ARG PIP_INDEX_URL UV_INDEX_URL
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
ARG PYTORCH_CUDA_INDEX_BASE_URL
ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL
ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER
# # Install uv for faster pip installs
# RUN --mount=type=cache,target=/root/.cache/uv \
# python3 -m pip install uv
# Install uv for faster pip installs
RUN --mount=type=cache,target=/root/.cache/uv \
python3 -m pip install uv
# # This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out
# # Reference: https://github.com/astral-sh/uv/pull/1694
# ENV UV_HTTP_TIMEOUT=500
# ENV UV_INDEX_STRATEGY="unsafe-best-match"
# This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out
# Reference: https://github.com/astral-sh/uv/pull/1694
ENV UV_HTTP_TIMEOUT=500
ENV UV_INDEX_STRATEGY="unsafe-best-match"
# RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/
# RUN --mount=type=cache,target=/root/.cache/uv \
# if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
# uv pip install --system \
# --index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
# "torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319" ; \
# uv pip install --system \
# --index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
# --pre pytorch_triton==3.3.0+gitab727c40 ; \
# fi
RUN --mount=type=cache,target=/root/.cache/uv \
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
uv pip install --system \
--index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
"torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319" ; \
uv pip install --system \
--index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \
--pre pytorch_triton==3.3.0+gitab727c40 ; \
fi
# # Install vllm wheel first, so that torch etc will be installed.
# RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \
# --mount=type=cache,target=/root/.cache/uv \
# uv pip install --system dist/*.whl --verbose \
# --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
# Install vllm wheel first, so that torch etc will be installed.
RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \
--mount=type=cache,target=/root/.cache/uv \
uv pip install --system dist/*.whl --verbose \
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
# # Install FlashInfer from source
# ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
# ARG FLASHINFER_GIT_REF="v0.2.8rc1"
# RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH'
# . /etc/environment
# git clone --depth 1 --recursive --shallow-submodules \
# --branch ${FLASHINFER_GIT_REF} \
# ${FLASHINFER_GIT_REPO} flashinfer
# # Exclude CUDA arches for older versions (11.x and 12.0-12.7)
# # TODO: Update this to allow setting TORCH_CUDA_ARCH_LIST as a build arg.
# if [[ "${CUDA_VERSION}" == 11.* ]]; then
# FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9"
# elif [[ "${CUDA_VERSION}" == 12.[0-7]* ]]; then
# FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9 9.0a"
# else
# # CUDA 12.8+ supports 10.0a and 12.0
# FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9 9.0a 10.0a 12.0"
# fi
# echo "🏗️ Building FlashInfer for arches: ${FI_TORCH_CUDA_ARCH_LIST}"
# # Needed to build AOT kernels
# pushd flashinfer
# TORCH_CUDA_ARCH_LIST="${FI_TORCH_CUDA_ARCH_LIST}" \
# python3 -m flashinfer.aot
# TORCH_CUDA_ARCH_LIST="${FI_TORCH_CUDA_ARCH_LIST}" \
# uv pip install --system --no-build-isolation .
# popd
# rm -rf flashinfer
# BASH
# COPY examples examples
# COPY benchmarks benchmarks
# COPY ./vllm_v0.10.0/vllm/collect_env.py .
# Install FlashInfer from source
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
ARG FLASHINFER_GIT_REF="v0.2.8rc1"
RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH'
. /etc/environment
git clone --depth 1 --recursive --shallow-submodules \
--branch ${FLASHINFER_GIT_REF} \
${FLASHINFER_GIT_REPO} flashinfer
# Exclude CUDA arches for older versions (11.x and 12.0-12.7)
# TODO: Update this to allow setting TORCH_CUDA_ARCH_LIST as a build arg.
if [[ "${CUDA_VERSION}" == 11.* ]]; then
FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9"
elif [[ "${CUDA_VERSION}" == 12.[0-7]* ]]; then
FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9 9.0a"
else
# CUDA 12.8+ supports 10.0a and 12.0
FI_TORCH_CUDA_ARCH_LIST="7.5 8.0 8.9 9.0a 10.0a 12.0"
fi
echo "🏗️ Building FlashInfer for arches: ${FI_TORCH_CUDA_ARCH_LIST}"
# Needed to build AOT kernels
pushd flashinfer
TORCH_CUDA_ARCH_LIST="${FI_TORCH_CUDA_ARCH_LIST}" \
python3 -m flashinfer.aot
TORCH_CUDA_ARCH_LIST="${FI_TORCH_CUDA_ARCH_LIST}" \
uv pip install --system --no-build-isolation .
popd
rm -rf flashinfer
BASH
COPY ./vllm_v0.10.0/examples examples
COPY ./vllm_v0.10.0/benchmarks benchmarks
COPY ./vllm_v0.10.0/vllm/collect_env.py .
# RUN --mount=type=cache,target=/root/.cache/uv \
# . /etc/environment && \
# uv pip list
RUN --mount=type=cache,target=/root/.cache/uv \
. /etc/environment && \
uv pip list
# COPY ./vllm_v0.10.0/requirements/build.txt requirements/build.txt
# RUN --mount=type=cache,target=/root/.cache/uv \
# uv pip install --system -r ./vllm_v0.10.0/requirements/build.txt \
# --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
COPY ./vllm_v0.10.0/requirements/build.txt requirements/build.txt
RUN --mount=type=cache,target=/root/.cache/uv \
uv pip install --system -r ./vllm_v0.10.0/requirements/build.txt \
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
# #################### vLLM installation IMAGE ####################
#################### vLLM installation IMAGE ####################
# #################### TEST IMAGE ####################
# # image to run unit testing suite
@ -419,7 +419,7 @@ COPY ./vllm_v0.10.0/requirements/kv_connectors.txt requirements/kv_connectors.tx
# install additional dependencies for openai api server
RUN --mount=type=cache,target=/root/.cache/uv \
if [ "$INSTALL_KV_CONNECTORS" = "true" ]; then \
uv pip install --system -r requirements/kv_connectors.txt; \
uv pip install --system -r ./vllm_v0.10.0/requirements/kv_connectors.txt; \
fi; \
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
BITSANDBYTES_VERSION="0.42.0"; \