1
+
1
2
# The vLLM Dockerfile is used to construct vLLM image that can be directly used
2
3
# to run the OpenAI compatible server.
3
4
@@ -63,15 +64,15 @@ ARG PIP_KEYRING_PROVIDER=disabled
63
64
ARG UV_KEYRING_PROVIDER=${PIP_KEYRING_PROVIDER}
64
65
65
66
# Flag enables build-in KV-connector dependency libs into docker images
66
- ARG INSTALL_KV_CONNECTOR =false
67
+ ARG INSTALL_KV_CONNECTORS =false
67
68
68
69
# ################### BASE BUILD IMAGE ####################
69
70
# prepare basic build environment
70
71
FROM ${BUILD_BASE_IMAGE} AS base
71
72
ARG CUDA_VERSION
72
73
ARG PYTHON_VERSION
73
74
ARG TARGETPLATFORM
74
- ARG INSTALL_KV_CONNECTOR =false
75
+ ARG INSTALL_KV_CONNECTORS =false
75
76
ENV DEBIAN_FRONTEND=noninteractive
76
77
77
78
ARG DEADSNAKES_MIRROR_URL
@@ -280,7 +281,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
280
281
FROM ${FINAL_BASE_IMAGE} AS vllm-base
281
282
ARG CUDA_VERSION
282
283
ARG PYTHON_VERSION
283
- ARG INSTALL_KV_CONNECTOR =false
284
+ ARG INSTALL_KV_CONNECTORS =false
284
285
WORKDIR /vllm-workspace
285
286
ENV DEBIAN_FRONTEND=noninteractive
286
287
ARG TARGETPLATFORM
@@ -490,7 +491,7 @@ RUN mv mkdocs.yaml test_docs/
490
491
# base openai image with additional requirements, for any subsequent openai-style images
491
492
FROM vllm-base AS vllm-openai-base
492
493
ARG TARGETPLATFORM
493
- ARG INSTALL_KV_CONNECTOR =false
494
+ ARG INSTALL_KV_CONNECTORS =false
494
495
495
496
ARG PIP_INDEX_URL UV_INDEX_URL
496
497
ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL
@@ -503,7 +504,7 @@ COPY requirements/kv_connectors.txt requirements/kv_connectors.txt
503
504
504
505
# install additional dependencies for openai api server
505
506
RUN --mount=type=cache,target=/root/.cache/uv \
506
- if [ "$INSTALL_KV_CONNECTOR " = "true" ]; then \
507
+ if [ "$INSTALL_KV_CONNECTORS " = "true" ]; then \
507
508
uv pip install --system -r requirements/kv_connectors.txt; \
508
509
fi; \
509
510
if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
0 commit comments