diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 85853997..5639bddc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,7 +6,7 @@ Our vision is to build the defacto CLI for quickly spinning up an AI Agent proje ### Exclusive Contributor Sticker AgentStack contributors all receive a free sticker pack including an exclusive holographic sticker only available to contributors to the project :) -Once your PR is merge, fill out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfvBEnsT8nsQleonJHoWQtHuhbsgUJ0a9IjOqeZbMGkga2NtA/viewform?usp=sf_link) and I'll send your sticker pack out ASAP! <3 +Once your PR is merged, fill out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfvBEnsT8nsQleonJHoWQtHuhbsgUJ0a9IjOqeZbMGkga2NtA/viewform?usp=sf_link) and I'll send your sticker pack out ASAP! <3 ## How to Help @@ -16,14 +16,15 @@ The best place to engage in conversation about your contribution is in the Issue ## Setup -1. Clone the repo - `git clone https://github.com/AgentOps-AI/AgentStack.git` +1. Fork the repo from the github website or with [gh repo fork AgentOps-AI/AgentStack](https://cli.github.com/manual/gh_repo_fork) +2. Clone the forked repo and get in there! + `ssh example` + `git clone git@github.com:/AgentStack.git` `cd AgentStack` -2. Install agentstack as an edtiable project and set it up for development and testing +3. Install agentstack as an edtiable project and set it up for development and testing `pip install -e .[dev,test]` This will install the CLI locally and in editable mode so you can use `agentstack ` to test your latest changes - ## Adding Tools If you're reading this section, you probably have a product that AI agents can use as a tool. We're glad you're here! diff --git a/agentstack/cli/cli.py b/agentstack/cli/cli.py index 70e7c3ba..2c08ca19 100644 --- a/agentstack/cli/cli.py +++ b/agentstack/cli/cli.py @@ -1,5 +1,4 @@ from typing import Optional -import os, sys from art import text2art import inquirer from agentstack import conf, log @@ -16,10 +15,10 @@ 'deepseek/deepseek-coder', 'deepseek/deepseek-reasoner', 'openai/gpt-4o', - 'anthropic/claude-3-5-sonnet', 'openai/o1-preview', 'openai/gpt-4-turbo', - 'anthropic/claude-3-opus', + 'anthropic/claude-3-opus-latest', + 'anthropic/claude-3-5-sonnet-20240620', ] @@ -38,7 +37,7 @@ def welcome_message(): def undo() -> None: """Undo the last committed changes.""" conf.assert_project() - + changed_files = repo.get_uncommitted_files() if changed_files: log.warning("There are uncommitted changes that may be overwritten.") @@ -113,4 +112,3 @@ def parse_insertion_point(position: Optional[str] = None) -> Optional[InsertionP raise ValueError(f"Position must be one of {','.join(valid_positions)}.") return next(x for x in InsertionPoint if x.value == position) - diff --git a/tests/test_preferred_models.py b/tests/test_preferred_models.py new file mode 100644 index 00000000..d0b8bee6 --- /dev/null +++ b/tests/test_preferred_models.py @@ -0,0 +1,78 @@ +from importlib.util import find_spec +import pytest +import subprocess +import sys +from difflib import get_close_matches +from agentstack.cli.cli import PREFERRED_MODELS + + +@pytest.fixture(scope="session", autouse=True) +def install_litellm(): + """Install litellm if not already installed.""" + print("\nChecking for litellm installation...") + spec = find_spec("litellm") + if spec is None: + print("litellm not found, installing...") + subprocess.check_call([sys.executable, "-m", "pip", "install", "litellm"]) + print("litellm installation complete") + + +def clean_model_name(provider: str, model: str) -> str: + """ + Clean up model name by removing duplicate provider strings. + Seems like in litellm groq and deepseek have the provider twice in the model name. + """ + if model.startswith(f"{provider}/"): + return f"{provider}/{model[len(provider) + 1 :]}" + return f"{provider}/{model}" + + +def find_similar_models(model: str, all_models: set, num_suggestions: int = 3) -> list[str]: + """ + Find similar model names using string matching. + If the test fails, now you can see the ideal model to replace a broken one with. + """ + try: + provider, model_name = model.split('/') + except ValueError: + return get_close_matches(model, all_models, n=num_suggestions, cutoff=0.3) + + provider_models = [m for m in all_models if m.startswith(f"{provider}/")] + if provider_models: + matches = get_close_matches(model, provider_models, n=num_suggestions, cutoff=0.3) + if matches: + return matches + return get_close_matches(model, all_models, n=num_suggestions, cutoff=0.3) + + +def test_preferred_models_validity(): + """Test that all PREFERRED_MODELS are valid LiteLLM models.""" + from litellm import models_by_provider + + all_litellm_models = set() + for provider, models in models_by_provider.items(): + for model in models: + full_model_name = clean_model_name(provider, model) + all_litellm_models.add(full_model_name) + + invalid_models_with_suggestions = {} + for model in PREFERRED_MODELS: + if model not in all_litellm_models: + suggestions = find_similar_models(model, all_litellm_models) + invalid_models_with_suggestions[model] = suggestions + + if invalid_models_with_suggestions: + error_message = ( + "The following models are not in LiteLLM's supported models:\n" + "\nFor a complete list of supported models, visit: https://docs.litellm.ai/docs/providers\n" + ) + for model, suggestions in invalid_models_with_suggestions.items(): + error_message += f"\n- {model}" + if suggestions: + error_message += "\n Similar available models:" + for suggestion in suggestions: + error_message += f"\n * {suggestion}" + else: + error_message += "\n No similar models found." + + assert False, error_message