From f41f09a50ad680d2664b703a2685de28fd3a32d9 Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Tue, 27 Jun 2023 02:05:46 +0900 Subject: [PATCH 01/11] add api_like_OAI.py --- examples/server/api_like_OAI.py | 194 ++++++++++++++++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100755 examples/server/api_like_OAI.py diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py new file mode 100755 index 0000000000000..8995adc0345dd --- /dev/null +++ b/examples/server/api_like_OAI.py @@ -0,0 +1,194 @@ +import argparse +from flask import Flask, jsonify, request, Response +import urllib.parse +import requests +import time +import json + + +app = Flask(__name__) + +parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.") +parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.)", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.') +parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: USER:)", default="USER:") +parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: ASSISTANT:)", default="ASSISTANT:") +parser.add_argument("--system-name", type=str, help="SYSTEM name in chat completions(default: ASSISTANT's RULE:)", default="ASSISTANT's RULE:") +parser.add_argument("--stop", type=str, help="the end of response in chat completions(default: )", default="") +parser.add_argument("--llama-api", type=str, help="Set the address of server.cpp in llama.cpp(default: http://127.0.0.1:8080)", default='http://127.0.0.1:8080') +parser.add_argument("--api-key", type=str, help="Set the api key to allow only few user(default: NULL)", default="") +parser.add_argument("--host", type=str, help="Set the ip address to listen.(default: 127.0.0.1)", default='127.0.0.1') +parser.add_argument("--port", type=int, help="Set the port to listen.(default: 8081)", default=8081) + +args = parser.parse_args() + +def is_present(json, key): + try: + buf = json[key] + except KeyError: + return False + return True + +#convert chat to prompt +def convert_chat(messages): + prompt = "" + args.chat_prompt + "\n\n" + + for line in messages: + if (line["role"] == "system"): + prompt += f"{args.system_name} {line['content']}\n" + if (line["role"] == "user"): + prompt += f"{args.user_name} {line['content']}\n" + if (line["role"] == "assistant"): + prompt += f"{args.ai_name} {line['content']}{args.stop}\n" + prompt += args.ai_name + + return prompt + +def make_postData(body, chat=False, stream=False): + postData = {} + if (chat): + postData["prompt"] = convert_chat(body["messages"]) + else: + postData["prompt"] = body["prompt"] + if(is_present(body, "temperature")): postData["temperature"] = body["temperature"] + if(is_present(body, "top_k")): postData["top_k"] = body["top_k"] + if(is_present(body, "top_p")): postData["top_p"] = body["top_p"] + if(is_present(body, "max_tokens")): postData["n_predict"] = body["max_tokens"] + if(is_present(body, "presence_penalty")): postData["presence_penalty"] = body["presence_penalty"] + if(is_present(body, "frequency_penalty")): postData["frequency_penalty"] = body["frequency_penalty"] + if(is_present(body, "repeat_penalty")): postData["repeat_penalty"] = body["repeat_penalty"] + if(is_present(body, "mirostat")): postData["mirostat"] = body["mirostat"] + if(is_present(body, "mirostat_tau")): postData["mirostat_tau"] = body["mirostat_tau"] + if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"] + if(is_present(body, "seed")): postData["seed"] = body["seed"] + if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()].append(args.stop) + if(is_present(body, "stop")): postData["stop"] = body["stop"] + + postData["stream"] = stream + + return postData + +def make_resData(data, chat=False, promptToken=[]): + resData = { + "id": "chatcmpl" if (chat) else "cmpl", + "object": "chat.completion" if (chat) else "text_completion", + "created": int(time.time()), + "model": "LLaMA_CPP", + "promptToken": promptToken, + "usage": { + "prompt_tokens": len(promptToken), + "completion_tokens": data["tokens_predicted"], + "total_tokens": len(promptToken) + data["tokens_predicted"] + } + } + if (chat): + #only one choice is supported + resData["choices"] = [{ + "index": 0, + "message": { + "role": "assistant", + "content": data["content"], + }, + "finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" + }] + else: + #only one choice is supported + resData["choices"] = [{ + "text": data["content"], + "index": 0, + "logprobs": None, + "finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" + }] + return resData + +def make_resData_stream(data, chat=False, time_now = 0, start=False): + resData = { + "id": "chatcmpl" if (chat) else "cmpl", + "object": "chat.completion.chunk" if (chat) else "text_completion.chunk", + "created": time_now, + "model": "LLaMA_CPP", + "choices": [ + { + "finish_reason": None, + "index": 0 + } + ] + } + if (chat): + if (start): + resData["choices"][0]["delta"] = { + "role": "assistant" + } + else: + resData["choices"][0]["delta"] = { + "content": data["content"] + } + if (data["stop"]): + resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" + else: + resData["choices"][0]["text"] = data["content"] + if (data["stop"]): + resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" + + return resData + + +@app.route('/chat/completions', methods=['POST']) +def chat_completions(): + if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key): + return Response(status=403) + body = request.get_json() + stream = False + if(is_present(body, "stream")): stream = body["stream"] + postData = make_postData(body, chat=True, stream=stream) + + tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json() + promptToken = tokenData["tokens"] + + if (not stream): + data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData)) + resData = make_resData(data.json(), chat=True, promptToken=promptToken) + return jsonify(resData) + else: + def generate(): + data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True) + time_now = int(time.time()) + resData = make_resData_stream({}, chat=True, time_now=time_now, start=True) + yield 'data: {}\n'.format(json.dumps(resData)) + for line in data.iter_lines(): + if line: + decoded_line = line.decode('utf-8') + resData = make_resData_stream(json.loads(decoded_line[6:]), chat=True, time_now=time_now) + yield 'data: {}\n'.format(json.dumps(resData)) + return Response(generate(), mimetype='text/event-stream') + + +@app.route('/completions', methods=['POST']) +def completion(): + if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key): + return Response(status=403) + body = request.get_json() + stream = False + if(is_present(body, "stream")): stream = body["stream"] + postData = make_postData(body, chat=False, stream=stream) + + tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json() + promptToken = tokenData["tokens"] + + if (not stream): + data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData)) + resData = make_resData(data.json(), chat=False, promptToken=promptToken) + return jsonify(resData) + else: + def generate(): + data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True) + time_now = int(time.time()) + for line in data.iter_lines(): + if line: + decoded_line = line.decode('utf-8') + resData = make_resData_stream(json.loads(decoded_line[6:]), chat=False, time_now=time_now) + yield 'data: {}\n'.format(json.dumps(resData)) + return Response(generate(), mimetype='text/event-stream') + + +if __name__ == '__main__': + app.run(args.host, port=args.port) \ No newline at end of file From cc5de812087472d81360dfe2d7695f3aa240a652 Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Wed, 28 Jun 2023 02:16:00 +0900 Subject: [PATCH 02/11] fix bugs, remove chat format using \n --- examples/server/api_like_OAI.py | 37 +++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 8995adc0345dd..53afc010d4a76 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -8,12 +8,12 @@ app = Flask(__name__) -parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.") -parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.)", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.') -parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: USER:)", default="USER:") -parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: ASSISTANT:)", default="ASSISTANT:") -parser.add_argument("--system-name", type=str, help="SYSTEM name in chat completions(default: ASSISTANT's RULE:)", default="ASSISTANT's RULE:") -parser.add_argument("--stop", type=str, help="the end of response in chat completions(default: )", default="") +parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file") +parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n') +parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\nUSER: ") +parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nASSISTANT: ") +parser.add_argument("--system-name", type=str, help="SYSTEM name in chat completions(default: '\\nASSISTANT's RULE: ')", default="\\nASSISTANT's RULE: ") +parser.add_argument("--stop", type=str, help="the end of response in chat completions(default: '')", default="") parser.add_argument("--llama-api", type=str, help="Set the address of server.cpp in llama.cpp(default: http://127.0.0.1:8080)", default='http://127.0.0.1:8080') parser.add_argument("--api-key", type=str, help="Set the api key to allow only few user(default: NULL)", default="") parser.add_argument("--host", type=str, help="Set the ip address to listen.(default: 127.0.0.1)", default='127.0.0.1') @@ -21,6 +21,9 @@ args = parser.parse_args() + +print(len(args.system_name)) + def is_present(json, key): try: buf = json[key] @@ -28,18 +31,25 @@ def is_present(json, key): return False return True + + #convert chat to prompt def convert_chat(messages): - prompt = "" + args.chat_prompt + "\n\n" + prompt = "" + args.chat_prompt.replace("\\n", "\n") + + system_n = args.system_name.replace("\\n", "\n") + user_n = args.user_name.replace("\\n", "\n") + ai_n = args.ai_name.replace("\\n", "\n") + for line in messages: if (line["role"] == "system"): - prompt += f"{args.system_name} {line['content']}\n" + prompt += f"{system_n}{line['content']}" if (line["role"] == "user"): - prompt += f"{args.user_name} {line['content']}\n" + prompt += f"{user_n}{line['content']}" if (line["role"] == "assistant"): - prompt += f"{args.ai_name} {line['content']}{args.stop}\n" - prompt += args.ai_name + prompt += f"{ai_n}{line['content']}{args.stop}" + prompt += ai_n return prompt @@ -60,8 +70,9 @@ def make_postData(body, chat=False, stream=False): if(is_present(body, "mirostat_tau")): postData["mirostat_tau"] = body["mirostat_tau"] if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"] if(is_present(body, "seed")): postData["seed"] = body["seed"] - if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()].append(args.stop) - if(is_present(body, "stop")): postData["stop"] = body["stop"] + if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()] + postData["stop"] = [args.stop] + if(is_present(body, "stop")): postData["stop"] += body["stop"] postData["stream"] = stream From e1abf636a40d4c0b97d3cc8235cef0feb8764473 Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Wed, 28 Jun 2023 02:18:03 +0900 Subject: [PATCH 03/11] fix mistakes --- examples/server/api_like_OAI.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 53afc010d4a76..5adcb1c5d972a 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -8,7 +8,7 @@ app = Flask(__name__) -parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file") +parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.") parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n') parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\nUSER: ") parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nASSISTANT: ") @@ -21,9 +21,6 @@ args = parser.parse_args() - -print(len(args.system_name)) - def is_present(json, key): try: buf = json[key] From a4149aa0c84d6489bd34983f5c578625ef373793 Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Thu, 29 Jun 2023 02:04:37 +0900 Subject: [PATCH 04/11] change token count method --- examples/server/api_like_OAI.py | 23 ++++++++++++++++------- examples/server/server.cpp | 14 +++++++++----- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 5adcb1c5d972a..02a846320fa4c 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -81,13 +81,14 @@ def make_resData(data, chat=False, promptToken=[]): "object": "chat.completion" if (chat) else "text_completion", "created": int(time.time()), "model": "LLaMA_CPP", - "promptToken": promptToken, "usage": { - "prompt_tokens": len(promptToken), + "prompt_tokens": data["tokens_evaluated"], "completion_tokens": data["tokens_predicted"], - "total_tokens": len(promptToken) + data["tokens_predicted"] + "total_tokens": data["tokens_evaluated"] + data["tokens_predicted"] } } + if (len(promptToken) != 0): + resData["promptToken"] = promptToken if (chat): #only one choice is supported resData["choices"] = [{ @@ -146,11 +147,15 @@ def chat_completions(): return Response(status=403) body = request.get_json() stream = False + tokenize = False if(is_present(body, "stream")): stream = body["stream"] + if(is_present(body, "tokenize")): tokenize = body["tokenize"] postData = make_postData(body, chat=True, stream=stream) - tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json() - promptToken = tokenData["tokens"] + promptToken = [] + if (tokenize): + tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json() + promptToken = tokenData["tokens"] if (not stream): data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData)) @@ -176,11 +181,15 @@ def completion(): return Response(status=403) body = request.get_json() stream = False + tokenize = False if(is_present(body, "stream")): stream = body["stream"] + if(is_present(body, "tokenize")): tokenize = body["tokenize"] postData = make_postData(body, chat=False, stream=stream) - tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json() - promptToken = tokenData["tokens"] + promptToken = [] + if (tokenize): + tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json() + promptToken = tokenData["tokens"] if (not stream): data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData)) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 79df5e84762cd..6ae2e432017db 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -108,6 +108,7 @@ struct llama_server_context { bool has_next_token = false; std::string generated_text; + size_t num_prompt_tokens = 0; size_t num_tokens_predicted = 0; size_t n_past = 0; size_t n_remain = 0; @@ -139,6 +140,7 @@ struct llama_server_context { void rewind() { params.antiprompt.clear(); + num_prompt_tokens = 0; num_tokens_predicted = 0; generated_text = ""; generated_text.reserve(params.n_ctx); @@ -169,17 +171,18 @@ struct llama_server_context { void loadPrompt() { params.prompt.insert(0, 1, ' '); // always add a first space std::vector prompt_tokens = ::llama_tokenize(ctx, params.prompt, true); + num_prompt_tokens = prompt_tokens.size(); if (params.n_keep < 0) { - params.n_keep = (int)prompt_tokens.size(); + params.n_keep = (int)num_prompt_tokens; } params.n_keep = std::min(params.n_ctx - 4, params.n_keep); // if input prompt is too big, truncate like normal - if (prompt_tokens.size() >= (size_t)params.n_ctx) { + if (num_prompt_tokens>= (size_t)params.n_ctx) { const int n_left = (params.n_ctx - params.n_keep) / 2; std::vector new_tokens(prompt_tokens.begin(), prompt_tokens.begin() + params.n_keep); - const int erased_blocks = (prompt_tokens.size() - params.n_keep - n_left - 1) / n_left; + const int erased_blocks = (num_prompt_tokens - params.n_keep - n_left - 1) / n_left; new_tokens.insert(new_tokens.end(), prompt_tokens.begin() + params.n_keep + erased_blocks * n_left, prompt_tokens.end()); std::copy(prompt_tokens.end() - params.n_ctx, prompt_tokens.end(), last_n_tokens.begin()); @@ -193,7 +196,7 @@ struct llama_server_context { truncated = true; prompt_tokens = new_tokens; } else { - const size_t ps = prompt_tokens.size(); + const size_t ps = num_prompt_tokens; std::fill(last_n_tokens.begin(), last_n_tokens.end() - ps, 0); std::copy(prompt_tokens.begin(), prompt_tokens.end(), last_n_tokens.end() - ps); } @@ -201,7 +204,7 @@ struct llama_server_context { // compare the evaluated prompt with the new prompt n_past = common_part(embd, prompt_tokens); embd = prompt_tokens; - if (n_past == prompt_tokens.size()) { + if (n_past == num_prompt_tokens) { // we have to evaluate at least 1 token to generate logits. n_past--; } @@ -684,6 +687,7 @@ static json format_final_response(llama_server_context & llama, const std::strin { "stop", true }, { "model", llama.params.model_alias }, { "tokens_predicted", llama.num_tokens_predicted }, + { "tokens_evaluated", llama.num_prompt_tokens }, { "generation_settings", format_generation_settings(llama) }, { "prompt", llama.params.prompt }, { "truncated", llama.truncated }, From d7435fe320ad177f1897f40fcb25bfff8852f6ba Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Fri, 30 Jun 2023 00:03:02 +0900 Subject: [PATCH 05/11] fix whitespace, edit README.md --- examples/server/README.md | 16 ++++++++++++++++ examples/server/api_like_OAI.py | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/examples/server/README.md b/examples/server/README.md index fa95c00441bc2..fdd973e233d81 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -190,3 +190,19 @@ Run with bash: ```sh bash chat.sh ``` + +### API like OAI + +API example using Python Flask: [api_like_OAI.py](api_like_OAI.py) +This example must be used with server.cpp + +```sh +python api_like_OAI.py +``` + +After running the API server, you can use it in Python by setting the API base URL. +```python +openai.api_base = "http://:port" +``` + +Then you can utilize llama.cpp as an OpenAI's **chat.completion** or **text_completion** API \ No newline at end of file diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 02a846320fa4c..93c9bb49d3b4b 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -70,7 +70,7 @@ def make_postData(body, chat=False, stream=False): if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()] postData["stop"] = [args.stop] if(is_present(body, "stop")): postData["stop"] += body["stop"] - + postData["stream"] = stream return postData From b95016c19bc201631069dcf6581f198538902dda Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Fri, 30 Jun 2023 01:08:34 +0900 Subject: [PATCH 06/11] add newline --- examples/server/README.md | 2 +- examples/server/api_like_OAI.py | 422 ++++++++++++++++---------------- 2 files changed, 212 insertions(+), 212 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index fdd973e233d81..848719bc34f8f 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -205,4 +205,4 @@ After running the API server, you can use it in Python by setting the API base U openai.api_base = "http://:port" ``` -Then you can utilize llama.cpp as an OpenAI's **chat.completion** or **text_completion** API \ No newline at end of file +Then you can utilize llama.cpp as an OpenAI's **chat.completion** or **text_completion** API diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 93c9bb49d3b4b..01e4bf52828ba 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -1,211 +1,211 @@ -import argparse -from flask import Flask, jsonify, request, Response -import urllib.parse -import requests -import time -import json - - -app = Flask(__name__) - -parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.") -parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n') -parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\nUSER: ") -parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nASSISTANT: ") -parser.add_argument("--system-name", type=str, help="SYSTEM name in chat completions(default: '\\nASSISTANT's RULE: ')", default="\\nASSISTANT's RULE: ") -parser.add_argument("--stop", type=str, help="the end of response in chat completions(default: '')", default="") -parser.add_argument("--llama-api", type=str, help="Set the address of server.cpp in llama.cpp(default: http://127.0.0.1:8080)", default='http://127.0.0.1:8080') -parser.add_argument("--api-key", type=str, help="Set the api key to allow only few user(default: NULL)", default="") -parser.add_argument("--host", type=str, help="Set the ip address to listen.(default: 127.0.0.1)", default='127.0.0.1') -parser.add_argument("--port", type=int, help="Set the port to listen.(default: 8081)", default=8081) - -args = parser.parse_args() - -def is_present(json, key): - try: - buf = json[key] - except KeyError: - return False - return True - - - -#convert chat to prompt -def convert_chat(messages): - prompt = "" + args.chat_prompt.replace("\\n", "\n") - - system_n = args.system_name.replace("\\n", "\n") - user_n = args.user_name.replace("\\n", "\n") - ai_n = args.ai_name.replace("\\n", "\n") - - - for line in messages: - if (line["role"] == "system"): - prompt += f"{system_n}{line['content']}" - if (line["role"] == "user"): - prompt += f"{user_n}{line['content']}" - if (line["role"] == "assistant"): - prompt += f"{ai_n}{line['content']}{args.stop}" - prompt += ai_n - - return prompt - -def make_postData(body, chat=False, stream=False): - postData = {} - if (chat): - postData["prompt"] = convert_chat(body["messages"]) - else: - postData["prompt"] = body["prompt"] - if(is_present(body, "temperature")): postData["temperature"] = body["temperature"] - if(is_present(body, "top_k")): postData["top_k"] = body["top_k"] - if(is_present(body, "top_p")): postData["top_p"] = body["top_p"] - if(is_present(body, "max_tokens")): postData["n_predict"] = body["max_tokens"] - if(is_present(body, "presence_penalty")): postData["presence_penalty"] = body["presence_penalty"] - if(is_present(body, "frequency_penalty")): postData["frequency_penalty"] = body["frequency_penalty"] - if(is_present(body, "repeat_penalty")): postData["repeat_penalty"] = body["repeat_penalty"] - if(is_present(body, "mirostat")): postData["mirostat"] = body["mirostat"] - if(is_present(body, "mirostat_tau")): postData["mirostat_tau"] = body["mirostat_tau"] - if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"] - if(is_present(body, "seed")): postData["seed"] = body["seed"] - if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()] - postData["stop"] = [args.stop] - if(is_present(body, "stop")): postData["stop"] += body["stop"] - - postData["stream"] = stream - - return postData - -def make_resData(data, chat=False, promptToken=[]): - resData = { - "id": "chatcmpl" if (chat) else "cmpl", - "object": "chat.completion" if (chat) else "text_completion", - "created": int(time.time()), - "model": "LLaMA_CPP", - "usage": { - "prompt_tokens": data["tokens_evaluated"], - "completion_tokens": data["tokens_predicted"], - "total_tokens": data["tokens_evaluated"] + data["tokens_predicted"] - } - } - if (len(promptToken) != 0): - resData["promptToken"] = promptToken - if (chat): - #only one choice is supported - resData["choices"] = [{ - "index": 0, - "message": { - "role": "assistant", - "content": data["content"], - }, - "finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" - }] - else: - #only one choice is supported - resData["choices"] = [{ - "text": data["content"], - "index": 0, - "logprobs": None, - "finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" - }] - return resData - -def make_resData_stream(data, chat=False, time_now = 0, start=False): - resData = { - "id": "chatcmpl" if (chat) else "cmpl", - "object": "chat.completion.chunk" if (chat) else "text_completion.chunk", - "created": time_now, - "model": "LLaMA_CPP", - "choices": [ - { - "finish_reason": None, - "index": 0 - } - ] - } - if (chat): - if (start): - resData["choices"][0]["delta"] = { - "role": "assistant" - } - else: - resData["choices"][0]["delta"] = { - "content": data["content"] - } - if (data["stop"]): - resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" - else: - resData["choices"][0]["text"] = data["content"] - if (data["stop"]): - resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" - - return resData - - -@app.route('/chat/completions', methods=['POST']) -def chat_completions(): - if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key): - return Response(status=403) - body = request.get_json() - stream = False - tokenize = False - if(is_present(body, "stream")): stream = body["stream"] - if(is_present(body, "tokenize")): tokenize = body["tokenize"] - postData = make_postData(body, chat=True, stream=stream) - - promptToken = [] - if (tokenize): - tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json() - promptToken = tokenData["tokens"] - - if (not stream): - data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData)) - resData = make_resData(data.json(), chat=True, promptToken=promptToken) - return jsonify(resData) - else: - def generate(): - data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True) - time_now = int(time.time()) - resData = make_resData_stream({}, chat=True, time_now=time_now, start=True) - yield 'data: {}\n'.format(json.dumps(resData)) - for line in data.iter_lines(): - if line: - decoded_line = line.decode('utf-8') - resData = make_resData_stream(json.loads(decoded_line[6:]), chat=True, time_now=time_now) - yield 'data: {}\n'.format(json.dumps(resData)) - return Response(generate(), mimetype='text/event-stream') - - -@app.route('/completions', methods=['POST']) -def completion(): - if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key): - return Response(status=403) - body = request.get_json() - stream = False - tokenize = False - if(is_present(body, "stream")): stream = body["stream"] - if(is_present(body, "tokenize")): tokenize = body["tokenize"] - postData = make_postData(body, chat=False, stream=stream) - - promptToken = [] - if (tokenize): - tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json() - promptToken = tokenData["tokens"] - - if (not stream): - data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData)) - resData = make_resData(data.json(), chat=False, promptToken=promptToken) - return jsonify(resData) - else: - def generate(): - data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True) - time_now = int(time.time()) - for line in data.iter_lines(): - if line: - decoded_line = line.decode('utf-8') - resData = make_resData_stream(json.loads(decoded_line[6:]), chat=False, time_now=time_now) - yield 'data: {}\n'.format(json.dumps(resData)) - return Response(generate(), mimetype='text/event-stream') - - -if __name__ == '__main__': - app.run(args.host, port=args.port) \ No newline at end of file +import argparse +from flask import Flask, jsonify, request, Response +import urllib.parse +import requests +import time +import json + + +app = Flask(__name__) + +parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.") +parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n') +parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\nUSER: ") +parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nASSISTANT: ") +parser.add_argument("--system-name", type=str, help="SYSTEM name in chat completions(default: '\\nASSISTANT's RULE: ')", default="\\nASSISTANT's RULE: ") +parser.add_argument("--stop", type=str, help="the end of response in chat completions(default: '')", default="") +parser.add_argument("--llama-api", type=str, help="Set the address of server.cpp in llama.cpp(default: http://127.0.0.1:8080)", default='http://127.0.0.1:8080') +parser.add_argument("--api-key", type=str, help="Set the api key to allow only few user(default: NULL)", default="") +parser.add_argument("--host", type=str, help="Set the ip address to listen.(default: 127.0.0.1)", default='127.0.0.1') +parser.add_argument("--port", type=int, help="Set the port to listen.(default: 8081)", default=8081) + +args = parser.parse_args() + +def is_present(json, key): + try: + buf = json[key] + except KeyError: + return False + return True + + + +#convert chat to prompt +def convert_chat(messages): + prompt = "" + args.chat_prompt.replace("\\n", "\n") + + system_n = args.system_name.replace("\\n", "\n") + user_n = args.user_name.replace("\\n", "\n") + ai_n = args.ai_name.replace("\\n", "\n") + + + for line in messages: + if (line["role"] == "system"): + prompt += f"{system_n}{line['content']}" + if (line["role"] == "user"): + prompt += f"{user_n}{line['content']}" + if (line["role"] == "assistant"): + prompt += f"{ai_n}{line['content']}{args.stop}" + prompt += ai_n + + return prompt + +def make_postData(body, chat=False, stream=False): + postData = {} + if (chat): + postData["prompt"] = convert_chat(body["messages"]) + else: + postData["prompt"] = body["prompt"] + if(is_present(body, "temperature")): postData["temperature"] = body["temperature"] + if(is_present(body, "top_k")): postData["top_k"] = body["top_k"] + if(is_present(body, "top_p")): postData["top_p"] = body["top_p"] + if(is_present(body, "max_tokens")): postData["n_predict"] = body["max_tokens"] + if(is_present(body, "presence_penalty")): postData["presence_penalty"] = body["presence_penalty"] + if(is_present(body, "frequency_penalty")): postData["frequency_penalty"] = body["frequency_penalty"] + if(is_present(body, "repeat_penalty")): postData["repeat_penalty"] = body["repeat_penalty"] + if(is_present(body, "mirostat")): postData["mirostat"] = body["mirostat"] + if(is_present(body, "mirostat_tau")): postData["mirostat_tau"] = body["mirostat_tau"] + if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"] + if(is_present(body, "seed")): postData["seed"] = body["seed"] + if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()] + postData["stop"] = [args.stop] + if(is_present(body, "stop")): postData["stop"] += body["stop"] + + postData["stream"] = stream + + return postData + +def make_resData(data, chat=False, promptToken=[]): + resData = { + "id": "chatcmpl" if (chat) else "cmpl", + "object": "chat.completion" if (chat) else "text_completion", + "created": int(time.time()), + "model": "LLaMA_CPP", + "usage": { + "prompt_tokens": data["tokens_evaluated"], + "completion_tokens": data["tokens_predicted"], + "total_tokens": data["tokens_evaluated"] + data["tokens_predicted"] + } + } + if (len(promptToken) != 0): + resData["promptToken"] = promptToken + if (chat): + #only one choice is supported + resData["choices"] = [{ + "index": 0, + "message": { + "role": "assistant", + "content": data["content"], + }, + "finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" + }] + else: + #only one choice is supported + resData["choices"] = [{ + "text": data["content"], + "index": 0, + "logprobs": None, + "finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" + }] + return resData + +def make_resData_stream(data, chat=False, time_now = 0, start=False): + resData = { + "id": "chatcmpl" if (chat) else "cmpl", + "object": "chat.completion.chunk" if (chat) else "text_completion.chunk", + "created": time_now, + "model": "LLaMA_CPP", + "choices": [ + { + "finish_reason": None, + "index": 0 + } + ] + } + if (chat): + if (start): + resData["choices"][0]["delta"] = { + "role": "assistant" + } + else: + resData["choices"][0]["delta"] = { + "content": data["content"] + } + if (data["stop"]): + resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" + else: + resData["choices"][0]["text"] = data["content"] + if (data["stop"]): + resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length" + + return resData + + +@app.route('/chat/completions', methods=['POST']) +def chat_completions(): + if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key): + return Response(status=403) + body = request.get_json() + stream = False + tokenize = False + if(is_present(body, "stream")): stream = body["stream"] + if(is_present(body, "tokenize")): tokenize = body["tokenize"] + postData = make_postData(body, chat=True, stream=stream) + + promptToken = [] + if (tokenize): + tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json() + promptToken = tokenData["tokens"] + + if (not stream): + data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData)) + resData = make_resData(data.json(), chat=True, promptToken=promptToken) + return jsonify(resData) + else: + def generate(): + data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True) + time_now = int(time.time()) + resData = make_resData_stream({}, chat=True, time_now=time_now, start=True) + yield 'data: {}\n'.format(json.dumps(resData)) + for line in data.iter_lines(): + if line: + decoded_line = line.decode('utf-8') + resData = make_resData_stream(json.loads(decoded_line[6:]), chat=True, time_now=time_now) + yield 'data: {}\n'.format(json.dumps(resData)) + return Response(generate(), mimetype='text/event-stream') + + +@app.route('/completions', methods=['POST']) +def completion(): + if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key): + return Response(status=403) + body = request.get_json() + stream = False + tokenize = False + if(is_present(body, "stream")): stream = body["stream"] + if(is_present(body, "tokenize")): tokenize = body["tokenize"] + postData = make_postData(body, chat=False, stream=stream) + + promptToken = [] + if (tokenize): + tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json() + promptToken = tokenData["tokens"] + + if (not stream): + data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData)) + resData = make_resData(data.json(), chat=False, promptToken=promptToken) + return jsonify(resData) + else: + def generate(): + data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True) + time_now = int(time.time()) + for line in data.iter_lines(): + if line: + decoded_line = line.decode('utf-8') + resData = make_resData_stream(json.loads(decoded_line[6:]), chat=False, time_now=time_now) + yield 'data: {}\n'.format(json.dumps(resData)) + return Response(generate(), mimetype='text/event-stream') + + +if __name__ == '__main__': + app.run(args.host, port=args.port) From 377ecf9e9b86e30dde2919f32816d3ffb62c8ace Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Sun, 2 Jul 2023 20:17:03 +0900 Subject: [PATCH 07/11] fix bugs --- examples/server/api_like_OAI.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 01e4bf52828ba..6eee550c6f35c 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -37,6 +37,7 @@ def convert_chat(messages): system_n = args.system_name.replace("\\n", "\n") user_n = args.user_name.replace("\\n", "\n") ai_n = args.ai_name.replace("\\n", "\n") + stop = args.stop.replace("\\n", "\n") for line in messages: @@ -45,8 +46,8 @@ def convert_chat(messages): if (line["role"] == "user"): prompt += f"{user_n}{line['content']}" if (line["role"] == "assistant"): - prompt += f"{ai_n}{line['content']}{args.stop}" - prompt += ai_n + prompt += f"{ai_n}{line['content']}{stop}" + prompt += ai_n.rstrip() return prompt From 7dcffd7a03305739823a5bbfa1f1c072cd9a39e9 Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Sun, 2 Jul 2023 20:45:29 +0900 Subject: [PATCH 08/11] set n_keep to -1 --- examples/server/api_like_OAI.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 6eee550c6f35c..453e1ec042492 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -71,7 +71,7 @@ def make_postData(body, chat=False, stream=False): if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()] postData["stop"] = [args.stop] if(is_present(body, "stop")): postData["stop"] += body["stop"] - + postData["n_keep"] = -1 postData["stream"] = stream return postData From f713dd515d3dc4b7bc58546b6188bb3ba105bad6 Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Mon, 3 Jul 2023 00:50:46 +0900 Subject: [PATCH 09/11] add /v1/ endpoints binding --- examples/server/api_like_OAI.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 453e1ec042492..4567f5eed6e50 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -143,6 +143,7 @@ def make_resData_stream(data, chat=False, time_now = 0, start=False): @app.route('/chat/completions', methods=['POST']) +@app.route('/v1/chat/completions', methods=['POST']) def chat_completions(): if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key): return Response(status=403) @@ -177,6 +178,7 @@ def generate(): @app.route('/completions', methods=['POST']) +@app.route('/v1/completions', methods=['POST']) def completion(): if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key): return Response(status=403) From 41f7a5004ac98aed6217f0f8549c902123011ce6 Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Wed, 5 Jul 2023 01:02:05 +0900 Subject: [PATCH 10/11] fix bug & add truncation return --- examples/server/api_like_OAI.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 4567f5eed6e50..e07324ba70274 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -69,7 +69,10 @@ def make_postData(body, chat=False, stream=False): if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"] if(is_present(body, "seed")): postData["seed"] = body["seed"] if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()] - postData["stop"] = [args.stop] + if (args.stop != ""): + postData["stop"] = [args.stop] + else: + postData["stop"] = [] if(is_present(body, "stop")): postData["stop"] += body["stop"] postData["n_keep"] = -1 postData["stream"] = stream @@ -81,6 +84,7 @@ def make_resData(data, chat=False, promptToken=[]): "id": "chatcmpl" if (chat) else "cmpl", "object": "chat.completion" if (chat) else "text_completion", "created": int(time.time()), + "truncated": data["truncated"], "model": "LLaMA_CPP", "usage": { "prompt_tokens": data["tokens_evaluated"], @@ -196,6 +200,7 @@ def completion(): if (not stream): data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData)) + print(data.json()) resData = make_resData(data.json(), chat=False, promptToken=promptToken) return jsonify(resData) else: @@ -210,5 +215,11 @@ def generate(): return Response(generate(), mimetype='text/event-stream') + + + + + + if __name__ == '__main__': app.run(args.host, port=args.port) From 93e69abe126c9a28bf4096682a36206c1cb327ea Mon Sep 17 00:00:00 2001 From: jwj7140 Date: Wed, 5 Jul 2023 01:08:25 +0900 Subject: [PATCH 11/11] print json --- examples/server/api_like_OAI.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index e07324ba70274..aa325a03ee444 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -165,6 +165,7 @@ def chat_completions(): if (not stream): data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData)) + print(data.json()) resData = make_resData(data.json(), chat=True, promptToken=promptToken) return jsonify(resData) else: @@ -214,12 +215,5 @@ def generate(): yield 'data: {}\n'.format(json.dumps(resData)) return Response(generate(), mimetype='text/event-stream') - - - - - - - if __name__ == '__main__': app.run(args.host, port=args.port)