From 48fd60d088af44e42590e69f6e2467de42d9d5f4 Mon Sep 17 00:00:00 2001 From: CyanideByte Date: Sat, 2 Dec 2023 21:39:19 -0800 Subject: [PATCH 1/2] Inserts dummy api key if missing for python scripts using local --- interpreter/core/llm/setup_text_llm.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/interpreter/core/llm/setup_text_llm.py b/interpreter/core/llm/setup_text_llm.py index 0d63fc5cf1..4662b25746 100644 --- a/interpreter/core/llm/setup_text_llm.py +++ b/interpreter/core/llm/setup_text_llm.py @@ -129,6 +129,9 @@ def base_llm(messages): else: params["temperature"] = 0.0 + if interpreter.local and not "api_key" in params: + params["api_key"] = "sk-dummykey" + if interpreter.model == "gpt-4-vision-preview": # We need to go straight to OpenAI for this, LiteLLM doesn't work if interpreter.api_base: From 05a86fadd53570c0a5be72cf1b3027ccb996e34e Mon Sep 17 00:00:00 2001 From: CyanideByte Date: Sat, 2 Dec 2023 21:56:42 -0800 Subject: [PATCH 2/2] Removed local check, people also use cloud hosted open source models --- interpreter/core/llm/setup_text_llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interpreter/core/llm/setup_text_llm.py b/interpreter/core/llm/setup_text_llm.py index 4662b25746..0401ddbed5 100644 --- a/interpreter/core/llm/setup_text_llm.py +++ b/interpreter/core/llm/setup_text_llm.py @@ -129,7 +129,7 @@ def base_llm(messages): else: params["temperature"] = 0.0 - if interpreter.local and not "api_key" in params: + if not "api_key" in params: params["api_key"] = "sk-dummykey" if interpreter.model == "gpt-4-vision-preview":