diff --git a/backends/python/server/requirements-intel.txt b/backends/python/server/requirements-intel.txt index 5accea69..8ff67fe4 100644 --- a/backends/python/server/requirements-intel.txt +++ b/backends/python/server/requirements-intel.txt @@ -42,4 +42,4 @@ win32-setctime==1.1.0 ; python_version >= "3.9" and python_version < "3.13" and wrapt==1.15.0 ; python_version >= "3.9" and python_version < "3.13" transformers==4.40.0 ; python_version >= "3.9" and python_version < "3.13" pyrsistent==0.20.0 ; python_version >= "3.9" and python_version < "3.13" -einops==0.8.0 ; python_version >= "3.9" and python_version < "3.13" \ No newline at end of file +einops==0.8.0 ; python_version >= "3.9" and python_version < "3.13" diff --git a/docs/source/en/custom_container.md b/docs/source/en/custom_container.md index ab2913c4..c670026c 100644 --- a/docs/source/en/custom_container.md +++ b/docs/source/en/custom_container.md @@ -37,6 +37,9 @@ Once you have determined the compute capability is determined, set it as the `ru the container as shown in the example below: ```shell +# Get submodule dependencies +git submodule update --init + runtime_compute_cap=80 docker build . -f Dockerfile-cuda --build-arg CUDA_COMPUTE_CAP=$runtime_compute_cap diff --git a/docs/source/en/intel_container.md b/docs/source/en/intel_container.md index f260fb4e..f0fae218 100644 --- a/docs/source/en/intel_container.md +++ b/docs/source/en/intel_container.md @@ -107,4 +107,4 @@ docker pull ghcr.io/huggingface/text-embeddings-inference:xpu-ipex-latest To use the prebuilt image optimized for IntelĀ® HPUs (Gaudi), run: ```shell docker pull ghcr.io/huggingface/text-embeddings-inference:hpu-latest -``` \ No newline at end of file +```