Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 17 additions & 15 deletions interpreter/terminal_interface/profiles/defaults/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,19 @@
import platform
import subprocess
import time
import wget

from interpreter import interpreter

if platform.system() == "Darwin": # Check if the system is MacOS
if platform.system() == "Darwin": # Check if the system is MacOS
result = subprocess.run(
["xcode-select", "-p"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
if result.returncode != 0:
interpreter.display_message(
"To use the new, fully-managed `interpreter --local` (powered by Llamafile) Open Interpreter requires Mac users to have Xcode installed. You can install Xcode from https://developer.apple.com/xcode/ .\n\nAlternatively, you can use `LM Studio`, `Jan.ai`, or `Ollama` to manage local language models. Learn more at https://docs.openinterpreter.com/guides/running-locally ."
)
time.sleep(7)
time.sleep(3)
raise Exception("Xcode is not installed. Please install Xcode and try again.")

# Define the path to the models directory
Expand All @@ -29,24 +30,24 @@
# Check if the new llamafile exists, if not download it
if not os.path.exists(llamafile_path):
interpreter.display_message(
"Open Interpreter will attempt to download and run the `Phi-2` language model. This should take ~10 minutes."
)
time.sleep(7)
subprocess.run(
[
"wget",
"-O",
llamafile_path,
"https://huggingface.co/jartine/phi-2-llamafile/resolve/main/phi-2.Q4_K_M.llamafile",
],
check=True,
"Attempting to download the `Phi-2` language model. This may take a few minutes."
)
time.sleep(3)

url = "https://huggingface.co/jartine/phi-2-llamafile/resolve/main/phi-2.Q4_K_M.llamafile"
wget.download(url, llamafile_path)

# Make the new llamafile executable
subprocess.run(["chmod", "+x", llamafile_path], check=True)
if platform.system() != "Windows":
subprocess.run(["chmod", "+x", llamafile_path], check=True)

# Run the new llamafile in the background
subprocess.Popen([llamafile_path])
if os.path.exists(llamafile_path):
subprocess.Popen([llamafile_path, "-ngl", "9999"])
else:
error_message = "The llamafile does not exist or is corrupted. Please ensure it has been downloaded correctly or try again."
print(error_message)
interpreter.display_message(error_message)

interpreter.system_message = "You are Open Interpreter, a world-class programmer that can execute code on the user's machine."
interpreter.offline = True
Expand All @@ -56,3 +57,4 @@
interpreter.llm.api_base = "https://localhost:8080/v1"
interpreter.llm.max_tokens = 1000
interpreter.llm.context_window = 3000
interpreter.llm.supports_functions = False
Loading