diff --git a/openai/api_requestor.py b/openai/api_requestor.py index 64e55e82ef..ea0c7ba957 100644 --- a/openai/api_requestor.py +++ b/openai/api_requestor.py @@ -340,6 +340,10 @@ def handle_error_response(self, rbody, rcode, resp, rheaders, stream_error=False if "internal_message" in error_data: error_data["message"] += "\n\n" + error_data["internal_message"] + if "Did you mean to use v1/chat/completions?" in error_data["message"]: + error_data["message"] = "Please use ChatCompletion instead of Completion when using the"\ + "'gpt-3.5-turbo' models. For more information see "\ + "https://platform.openai.com/docs/guides/chat/introduction." util.log_info( "OpenAI API error received", diff --git a/openai/tests/test_endpoints.py b/openai/tests/test_endpoints.py index c3fc1094bb..b023afcb85 100644 --- a/openai/tests/test_endpoints.py +++ b/openai/tests/test_endpoints.py @@ -78,6 +78,13 @@ def test_timeout_raises_error(): request_timeout=0.01, ) +def test_calling_chat_completion_incorrectly_raises_helpful_error(): + with pytest.raises(error.InvalidRequestError) as excinfo: + openai.Completion.create( + prompt="test", + model="gpt-3.5-turbo", + ) + assert "ChatCompletion" in str(excinfo.value) def test_timeout_does_not_error(): # A query that should be fast