diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index ba9795103..80120a0c0 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -1774,7 +1774,200 @@ async def nodes( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -1801,16 +1994,17 @@ async def nodes( to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 7d90246e5..eef722b8e 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -370,12 +370,7 @@ async def put( """ .. raw:: html -
Create an inference endpoint.
- When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint.
IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
@@ -458,11 +453,6 @@ async def put_alibabacloud(Create an AlibabaCloud AI Search inference endpoint.
Create an inference endpoint to perform an inference task with the alibabacloud-ai-search
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
-When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Anthropic inference endpoint.
Create an inference endpoint to perform an inference task with the anthropic
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Azure AI studio inference endpoint.
Create an inference endpoint to perform an inference task with the azureaistudio
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.
-When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Cohere inference endpoint.
Create an inference endpoint to perform an inference task with the cohere
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Google AI Studio inference endpoint.
Create an inference endpoint to perform an inference task with the googleaistudio
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Google Vertex AI inference endpoint.
Create an inference endpoint to perform an inference task with the googlevertexai
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
multilingual-e5-base
multilingual-e5-small
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint to perform an inference task with the jinaai
service.
To review the available rerank
models, refer to https://jina.ai/reranker.
To review the available text_embedding
models, refer to the https://jina.ai/embeddings/.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Mistral inference endpoint.
Creates an inference endpoint to perform an inference task with the mistral
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an OpenAI inference endpoint.
Create an inference endpoint to perform an inference task with the openai
service or openai
compatible APIs.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint to perform an inference task with the watsonxai
service.
You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai
inference service.
You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint.
- When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint.
IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.
@@ -458,11 +453,6 @@ def put_alibabacloud(Create an AlibabaCloud AI Search inference endpoint.
Create an inference endpoint to perform an inference task with the alibabacloud-ai-search
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.
-When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Anthropic inference endpoint.
Create an inference endpoint to perform an inference task with the anthropic
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Azure AI studio inference endpoint.
Create an inference endpoint to perform an inference task with the azureaistudio
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.
-When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Cohere inference endpoint.
Create an inference endpoint to perform an inference task with the cohere
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an Google AI Studio inference endpoint.
Create an inference endpoint to perform an inference task with the googleaistudio
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Google Vertex AI inference endpoint.
Create an inference endpoint to perform an inference task with the googlevertexai
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
multilingual-e5-base
multilingual-e5-small
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint to perform an inference task with the jinaai
service.
To review the available rerank
models, refer to https://jina.ai/reranker.
To review the available text_embedding
models, refer to the https://jina.ai/embeddings/.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create a Mistral inference endpoint.
Creates an inference endpoint to perform an inference task with the mistral
service.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an OpenAI inference endpoint.
Create an inference endpoint to perform an inference task with the openai
service or openai
compatible APIs.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.
Create an inference endpoint to perform an inference task with the watsonxai
service.
You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai
inference service.
You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.
When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.
- After creating the endpoint, wait for the model deployment to complete before using it.
- To verify the deployment status, use the get trained model statistics API.
- Look for "state": "fully_allocated"
in the response and ensure that the "allocation_count"
matches the "target_allocation_count"
.
- Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.