Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ability to select models and edit system/assistant prompts #1051

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 60 additions & 14 deletions browser/browser.py
Original file line number Diff line number Diff line change
@@ -1,36 +1,82 @@
import time

import streamlit as st
from openai import OpenAI

st.title("torchchat")

start_state = [
{
"role": "system",
"content": "You're an assistant. Answer questions directly, be brief, and have fun.",
},
{"role": "assistant", "content": "How can I help you?"},
]
client = OpenAI(
base_url="http://127.0.0.1:5000/v1",
api_key="813", # The OpenAI API requires an API key, but since we don't consume it, this can be any non-empty string.
)


with st.sidebar:
response_max_tokens = st.slider(
"Max Response Tokens", min_value=10, max_value=1000, value=250, step=10
)
st.divider()

# Build model list and allow user to change the model running on the server.
try:
models = client.models.list().data
model_keys = [model.id for model in models]
except:
models = []
model_keys = []
selected_model = st.selectbox(
label="Model",
options=model_keys,
)
is_instruct_model = "instruct" in selected_model.lower()

st.divider()

# Change system prompt and default chat message.
system_prompt = st.text_area(
label="System Prompt",
value=(
"You're an assistant. Answer questions directly, be brief, and have fun."
if is_instruct_model
else f'Selected model "{selected_model}" doesn\'t support chat.'
),
disabled=not is_instruct_model,
)
assistant_prompt = st.text_area(
label="Assistant Prompt",
value=(
"How can I help you?"
if is_instruct_model
else f'Selected model "{selected_model}" doesn\'t support chat.'
),
disabled=not is_instruct_model,
)

st.divider()

# Manage chat histoory and prompts.
start_state = (
[
{
"role": "system",
"content": system_prompt,
},
{"role": "assistant", "content": assistant_prompt},
]
if is_instruct_model
else []
)
if st.button("Reset Chat", type="primary"):
st.session_state["messages"] = start_state

if "messages" not in st.session_state:
st.session_state["messages"] = start_state

st.session_state["messages"] = start_state


for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])

if prompt := st.chat_input():
client = OpenAI(
base_url="http://127.0.0.1:5000/v1",
api_key="813", # The OpenAI API requires an API key, but since we don't consume it, this can be any non-empty string.
)

st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
Expand All @@ -56,7 +102,7 @@ def get_streamed_completion(completion_generator):
response = st.write_stream(
get_streamed_completion(
client.chat.completions.create(
model="llama3",
model=selected_model,
messages=st.session_state.messages,
max_tokens=response_max_tokens,
stream=True,
Expand Down
Loading