diff --git a/src/llama_stack_client/lib/cli/providers/providers.py b/src/llama_stack_client/lib/cli/providers/providers.py index bd07628d..3ae40dc3 100644 --- a/src/llama_stack_client/lib/cli/providers/providers.py +++ b/src/llama_stack_client/lib/cli/providers/providers.py @@ -2,6 +2,7 @@ from .list import list_providers from .inspect import inspect_provider +from .update import update_provider @click.group() @@ -13,3 +14,4 @@ def providers(): # Register subcommands providers.add_command(list_providers) providers.add_command(inspect_provider) +providers.add_command(update_provider) diff --git a/src/llama_stack_client/lib/cli/providers/update.py b/src/llama_stack_client/lib/cli/providers/update.py new file mode 100644 index 00000000..f9927b3f --- /dev/null +++ b/src/llama_stack_client/lib/cli/providers/update.py @@ -0,0 +1,35 @@ +import click +import yaml +from rich.console import Console +from ..common.utils import handle_client_errors + + +@click.command(name="update") +@click.argument("api") +@click.argument("provider_id") +@click.argument("provider_type") +@click.argument("config") +@click.pass_context +@handle_client_errors("update providers") +def update_provider(ctx, api, provider_id, provider_type, config): + """Show available providers on distribution endpoint""" + client = ctx.obj["client"] + console = Console() + + import ast + + config = ast.literal_eval(config) + + providers_response = client.providers.update( + provider_id=provider_id, provider_type=provider_type, api=api, config=config + ) + + if not providers_response: + click.secho("Provider not found", fg="red") + raise click.exceptions.Exit(1) + + console.print(f"provider_id={providers_response.provider_id}") + console.print(f"provider_type={providers_response.provider_type}") + console.print("config:") + for line in yaml.dump(providers_response.config, indent=2).split("\n"): + console.print(line) diff --git a/src/llama_stack_client/resources/providers.py b/src/llama_stack_client/resources/providers.py index d87960ac..61257251 100644 --- a/src/llama_stack_client/resources/providers.py +++ b/src/llama_stack_client/resources/providers.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Type, cast +from typing import Type, cast, Dict, Any import httpx @@ -15,10 +15,15 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .._utils import ( + maybe_transform, + async_maybe_transform, +) from .._wrappers import DataWrapper from .._base_client import make_request_options from ..types.provider_info import ProviderInfo from ..types.provider_list_response import ProviderListResponse +from ..types import provider_update_params __all__ = ["ProvidersResource", "AsyncProvidersResource"] @@ -42,6 +47,44 @@ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response """ return ProvidersResourceWithStreamingResponse(self) + + def update( + self, + api: str, + provider_id: str, + provider_type: str, + *, + config: Dict[str, Any], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProviderInfo: + """ + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._put( + f"/v1/providers/{api}/{provider_id}/{provider_type}", + body=maybe_transform( + { + "config": config, + }, + provider_update_params.UpdateProviderRequest, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProviderInfo, + ) def retrieve( self, @@ -116,6 +159,45 @@ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse For more information, see https://www.github.com/stainless-sdks/llama-stack-python#with_streaming_response """ return AsyncProvidersResourceWithStreamingResponse(self) + + + async def update( + self, + api: str, + provider_id: str, + provider_type: str, + *, + config: Dict[str, Any], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ProviderInfo: + """ + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._put( + f"/v1/providers/{api}/{provider_id}/{provider_type}", + body=async_maybe_transform( + { + "config": config, + }, + provider_update_params.UpdateProviderRequest, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ProviderInfo, + ) async def retrieve( self, @@ -175,6 +257,9 @@ class ProvidersResourceWithRawResponse: def __init__(self, providers: ProvidersResource) -> None: self._providers = providers + self.update = to_raw_response_wrapper( + providers.update, + ) self.retrieve = to_raw_response_wrapper( providers.retrieve, ) @@ -187,6 +272,9 @@ class AsyncProvidersResourceWithRawResponse: def __init__(self, providers: AsyncProvidersResource) -> None: self._providers = providers + self.update = async_to_raw_response_wrapper( + providers.update, + ) self.retrieve = async_to_raw_response_wrapper( providers.retrieve, ) @@ -199,6 +287,9 @@ class ProvidersResourceWithStreamingResponse: def __init__(self, providers: ProvidersResource) -> None: self._providers = providers + self.update = to_streamed_response_wrapper( + providers.update, + ) self.retrieve = to_streamed_response_wrapper( providers.retrieve, ) @@ -211,6 +302,9 @@ class AsyncProvidersResourceWithStreamingResponse: def __init__(self, providers: AsyncProvidersResource) -> None: self._providers = providers + self.update = async_to_streamed_response_wrapper( + providers.update, + ) self.retrieve = async_to_streamed_response_wrapper( providers.retrieve, ) diff --git a/src/llama_stack_client/types/provider_update_params.py b/src/llama_stack_client/types/provider_update_params.py new file mode 100644 index 00000000..07e3ac84 --- /dev/null +++ b/src/llama_stack_client/types/provider_update_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Union, Iterable, Any +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["UpdateProviderRequest"] + + +class UpdateProviderRequest(TypedDict, total=False): + config: Dict[str, Any]