|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | + |
| 4 | +# This source code is licensed under the license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +import time |
| 8 | + |
1 | 9 | import streamlit as st
|
2 |
| -from openai import OpenAI |
3 |
| - |
4 |
| -with st.sidebar: |
5 |
| - openai_api_key = st.text_input( |
6 |
| - "OpenAI API Key", key="chatbot_api_key", type="password" |
7 |
| - ) |
8 |
| - "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)" |
9 |
| - "[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)" |
10 |
| - "[](https://codespaces.new/streamlit/llm-examples?quickstart=1)" |
11 |
| - |
12 |
| -st.title("💬 Chatbot") |
13 |
| - |
14 |
| -if "messages" not in st.session_state: |
15 |
| - st.session_state["messages"] = [ |
16 |
| - { |
17 |
| - "role": "system", |
18 |
| - "content": "You're an assistant. Be brief, no yapping. Use as few words as possible to respond to the users' questions.", |
19 |
| - }, |
20 |
| - {"role": "assistant", "content": "How can I help you?"}, |
21 |
| - ] |
22 |
| - |
23 |
| -for msg in st.session_state.messages: |
24 |
| - st.chat_message(msg["role"]).write(msg["content"]) |
25 |
| - |
26 |
| -if prompt := st.chat_input(): |
27 |
| - client = OpenAI( |
28 |
| - # This is the default and can be omitted |
29 |
| - base_url="http://127.0.0.1:5000/v1", |
30 |
| - api_key="YOURMOTHER", |
31 |
| - ) |
32 |
| - |
33 |
| - st.session_state.messages.append({"role": "user", "content": prompt}) |
34 |
| - st.chat_message("user").write(prompt) |
35 |
| - response = client.chat.completions.create( |
36 |
| - model="stories15m", messages=st.session_state.messages, max_tokens=64 |
37 |
| - ) |
38 |
| - msg = response.choices[0].message.content |
39 |
| - st.session_state.messages.append({"role": "assistant", "content": msg}) |
40 |
| - st.chat_message("assistant").write(msg) |
| 10 | +from api.api import CompletionRequest, OpenAiApiGenerator |
| 11 | + |
| 12 | +from build.builder import BuilderArgs, TokenizerArgs |
| 13 | + |
| 14 | +from generate import GeneratorArgs |
| 15 | + |
| 16 | + |
| 17 | +def main(args): |
| 18 | + builder_args = BuilderArgs.from_args(args) |
| 19 | + speculative_builder_args = BuilderArgs.from_speculative_args(args) |
| 20 | + tokenizer_args = TokenizerArgs.from_args(args) |
| 21 | + generator_args = GeneratorArgs.from_args(args) |
| 22 | + generator_args.chat_mode = False |
| 23 | + |
| 24 | + @st.cache_resource |
| 25 | + def initialize_generator() -> OpenAiApiGenerator: |
| 26 | + return OpenAiApiGenerator( |
| 27 | + builder_args, |
| 28 | + speculative_builder_args, |
| 29 | + tokenizer_args, |
| 30 | + generator_args, |
| 31 | + args.profile, |
| 32 | + args.quantize, |
| 33 | + args.draft_quantize, |
| 34 | + ) |
| 35 | + |
| 36 | + gen = initialize_generator() |
| 37 | + |
| 38 | + st.title("torchchat") |
| 39 | + |
| 40 | + # Initialize chat history |
| 41 | + if "messages" not in st.session_state: |
| 42 | + st.session_state.messages = [] |
| 43 | + |
| 44 | + # Display chat messages from history on app rerun |
| 45 | + for message in st.session_state.messages: |
| 46 | + with st.chat_message(message["role"]): |
| 47 | + st.markdown(message["content"]) |
| 48 | + |
| 49 | + # Accept user input |
| 50 | + if prompt := st.chat_input("What is up?"): |
| 51 | + # Add user message to chat history |
| 52 | + st.session_state.messages.append({"role": "user", "content": prompt}) |
| 53 | + # Display user message in chat message container |
| 54 | + with st.chat_message("user"): |
| 55 | + st.markdown(prompt) |
| 56 | + |
| 57 | + # Display assistant response in chat message container |
| 58 | + with st.chat_message("assistant"), st.status( |
| 59 | + "Generating... ", expanded=True |
| 60 | + ) as status: |
| 61 | + |
| 62 | + req = CompletionRequest( |
| 63 | + model=gen.builder_args.checkpoint_path, |
| 64 | + prompt=prompt, |
| 65 | + temperature=generator_args.temperature, |
| 66 | + messages=[], |
| 67 | + ) |
| 68 | + |
| 69 | + def unwrap(completion_generator): |
| 70 | + start = time.time() |
| 71 | + tokcount = 0 |
| 72 | + for chunk_response in completion_generator: |
| 73 | + content = chunk_response.choices[0].delta.content |
| 74 | + if not gen.is_llama3_model or content not in set( |
| 75 | + gen.tokenizer.special_tokens.keys() |
| 76 | + ): |
| 77 | + yield content |
| 78 | + if content == gen.tokenizer.eos_id(): |
| 79 | + yield "." |
| 80 | + tokcount += 1 |
| 81 | + status.update( |
| 82 | + label="Done, averaged {:.2f} tokens/second".format( |
| 83 | + tokcount / (time.time() - start) |
| 84 | + ), |
| 85 | + state="complete", |
| 86 | + ) |
| 87 | + |
| 88 | + response = st.write_stream(unwrap(gen.completion(req))) |
| 89 | + |
| 90 | + # Add assistant response to chat history |
| 91 | + st.session_state.messages.append({"role": "assistant", "content": response}) |
0 commit comments