diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 897e9c3b2646..8cf9a43c6f29 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -401,6 +401,20 @@ "litellm_provider": "azure", "mode": "embedding" }, + "azure/text-embedding-3-large": { + "max_tokens": 8191, + "input_cost_per_token": 0.00000013, + "output_cost_per_token": 0.000000, + "litellm_provider": "azure", + "mode": "embedding" + }, + "azure/text-embedding-3-small": { + "max_tokens": 8191, + "input_cost_per_token": 0.00000002, + "output_cost_per_token": 0.000000, + "litellm_provider": "azure", + "mode": "embedding" + }, "azure/standard/1024-x-1024/dall-e-3": { "input_cost_per_pixel": 0.0000000381469, "output_cost_per_token": 0.0, @@ -657,6 +671,15 @@ "litellm_provider": "vertex_ai-language-models", "mode": "chat" }, + "gemini-1.5-pro": { + "max_tokens": 8192, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "input_cost_per_token": 0, + "output_cost_per_token": 0, + "litellm_provider": "vertex_ai-language-models", + "mode": "chat" + }, "gemini-pro-vision": { "max_tokens": 16384, "max_output_tokens": 2048, @@ -676,6 +699,18 @@ "litellm_provider": "vertex_ai-vision-models", "mode": "chat" }, + "gemini-1.5-pro-vision": { + "max_tokens": 8192, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_images_per_prompt": 16, + "max_videos_per_prompt": 1, + "max_video_length": 2, + "input_cost_per_token": 0, + "output_cost_per_token": 0, + "litellm_provider": "vertex_ai-vision-models", + "mode": "chat" + }, "textembedding-gecko": { "max_tokens": 3072, "max_input_tokens": 3072, @@ -771,6 +806,15 @@ "litellm_provider": "gemini", "mode": "chat" }, + "gemini/gemini-1.5-pro": { + "max_tokens": 8192, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "input_cost_per_token": 0, + "output_cost_per_token": 0, + "litellm_provider": "gemini", + "mode": "chat" + }, "gemini/gemini-pro-vision": { "max_tokens": 30720, "max_output_tokens": 2048, @@ -778,6 +822,15 @@ "output_cost_per_token": 0.0, "litellm_provider": "gemini", "mode": "chat" + }, + "gemini/gemini-1.5-pro-vision": { + "max_tokens": 8192, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "input_cost_per_token": 0, + "output_cost_per_token": 0, + "litellm_provider": "gemini", + "mode": "chat" }, "command-nightly": { "max_tokens": 4096, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index c3abbbf7dcfe..8cf9a43c6f29 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -401,6 +401,20 @@ "litellm_provider": "azure", "mode": "embedding" }, + "azure/text-embedding-3-large": { + "max_tokens": 8191, + "input_cost_per_token": 0.00000013, + "output_cost_per_token": 0.000000, + "litellm_provider": "azure", + "mode": "embedding" + }, + "azure/text-embedding-3-small": { + "max_tokens": 8191, + "input_cost_per_token": 0.00000002, + "output_cost_per_token": 0.000000, + "litellm_provider": "azure", + "mode": "embedding" + }, "azure/standard/1024-x-1024/dall-e-3": { "input_cost_per_pixel": 0.0000000381469, "output_cost_per_token": 0.0,