diff --git a/Cargo.lock b/Cargo.lock index 01e4703e..afbba0de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -326,7 +326,7 @@ dependencies = [ [[package]] name = "backend-grpc-client" -version = "1.6.1" +version = "1.7.0" dependencies = [ "grpc-metadata", "prost 0.11.9", @@ -4436,7 +4436,7 @@ dependencies = [ [[package]] name = "text-embeddings-backend" -version = "1.6.1" +version = "1.7.0" dependencies = [ "clap", "hf-hub", @@ -4452,7 +4452,7 @@ dependencies = [ [[package]] name = "text-embeddings-backend-candle" -version = "1.6.1" +version = "1.7.0" dependencies = [ "accelerate-src", "anyhow", @@ -4482,7 +4482,7 @@ dependencies = [ [[package]] name = "text-embeddings-backend-core" -version = "1.6.1" +version = "1.7.0" dependencies = [ "clap", "nohash-hasher", @@ -4491,7 +4491,7 @@ dependencies = [ [[package]] name = "text-embeddings-backend-ort" -version = "1.6.1" +version = "1.7.0" dependencies = [ "anyhow", "ndarray", @@ -4507,7 +4507,7 @@ dependencies = [ [[package]] name = "text-embeddings-backend-python" -version = "1.6.1" +version = "1.7.0" dependencies = [ "backend-grpc-client", "nohash-hasher", @@ -4521,7 +4521,7 @@ dependencies = [ [[package]] name = "text-embeddings-core" -version = "1.6.1" +version = "1.7.0" dependencies = [ "async-channel", "hf-hub", @@ -4536,7 +4536,7 @@ dependencies = [ [[package]] name = "text-embeddings-router" -version = "1.6.1" +version = "1.7.0" dependencies = [ "anyhow", "async-stream", diff --git a/docs/source/en/quick_tour.md b/docs/source/en/quick_tour.md index 5c021ee8..16973b5c 100644 --- a/docs/source/en/quick_tour.md +++ b/docs/source/en/quick_tour.md @@ -23,7 +23,7 @@ The easiest way to get started with TEI is to use one of the official Docker con Hence one needs to install Docker following their [installation instructions](https://docs.docker.com/get-docker/). -TEI supports inference both on GPU and CPU. If you plan on using a GPU, make sure to check that your hardware is supported by checking [this table](https://github.com/huggingface/text-embeddings-inference?tab=readme-ov-file#docker-images). +TEI supports inference both on GPU and CPU. If you plan on using a GPU, make sure to check that your hardware is supported by checking [this table](https://github.com/huggingface/text-embeddings-inference?tab=readme-ov-file#docker-images). Next, install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). NVIDIA drivers on your device need to be compatible with CUDA version 12.2 or higher. ## Deploy