Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,64 +2,67 @@ import streamlit as st
|
|
2 |
from gradio_client import Client
|
3 |
|
4 |
# Constants
|
5 |
-
|
6 |
-
|
7 |
-
This
|
8 |
-
|
9 |
"""
|
10 |
|
11 |
# Initialize client
|
12 |
-
|
13 |
-
|
14 |
|
15 |
with st.sidebar:
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
|
23 |
-
|
24 |
# Prediction function
|
25 |
-
def
|
26 |
-
with st.status("Requesting
|
27 |
-
st.write("Requesting API")
|
28 |
-
response =
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
)
|
37 |
st.write("Done")
|
38 |
return response
|
39 |
|
40 |
# Streamlit UI
|
41 |
-
st.title(
|
42 |
-
st.write(
|
43 |
-
|
44 |
|
45 |
-
if "
|
46 |
-
st.session_state.
|
47 |
|
48 |
# Display chat messages from history on app rerun
|
49 |
-
for message in st.session_state.
|
50 |
with st.chat_message(message["role"]):
|
51 |
st.markdown(message["content"])
|
52 |
|
53 |
# React to user input
|
54 |
-
if
|
55 |
# Display user message in chat message container
|
56 |
-
st.chat_message("
|
57 |
# Add user message to chat history
|
58 |
-
st.session_state.
|
59 |
|
60 |
-
response =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
# Display assistant response in chat message container
|
62 |
with st.chat_message("assistant", avatar='π¦'):
|
63 |
st.markdown(response)
|
64 |
# Add assistant response to chat history
|
65 |
-
st.session_state.
|
|
|
2 |
from gradio_client import Client
|
3 |
|
4 |
# Constants
|
5 |
+
APP_TITLE = "Llama2 70B Chatbot"
|
6 |
+
APP_DESCRIPTION = """
|
7 |
+
This application demonstrates the Llama-2-70b chatbot model by Meta,
|
8 |
+
fine-tuned for chat instructions. You can interact with the model and ask questions.
|
9 |
"""
|
10 |
|
11 |
# Initialize client
|
12 |
+
llama2_client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
|
|
|
13 |
|
14 |
with st.sidebar:
|
15 |
+
system_prompt_input = st.text_input("Optional system prompt:")
|
16 |
+
temperature_slider = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
|
17 |
+
max_new_tokens_slider = st.slider("Max new tokens", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0)
|
18 |
+
topp_slider = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
|
19 |
+
repetition_penalty_slider = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
|
|
|
20 |
|
|
|
21 |
# Prediction function
|
22 |
+
def get_llama2_response(user_message, system_prompt, temperature, max_new_tokens, topp, repetition_penalty):
|
23 |
+
with st.status("Requesting Llama-2"):
|
24 |
+
st.write("Requesting API...")
|
25 |
+
response = llama2_client.predict(
|
26 |
+
user_message,
|
27 |
+
system_prompt,
|
28 |
+
temperature,
|
29 |
+
max_new_tokens,
|
30 |
+
topp,
|
31 |
+
repetition_penalty,
|
32 |
+
api_name="/chat"
|
33 |
)
|
34 |
st.write("Done")
|
35 |
return response
|
36 |
|
37 |
# Streamlit UI
|
38 |
+
st.title(APP_TITLE)
|
39 |
+
st.write(APP_DESCRIPTION)
|
|
|
40 |
|
41 |
+
if "chat_history" not in st.session_state:
|
42 |
+
st.session_state.chat_history = []
|
43 |
|
44 |
# Display chat messages from history on app rerun
|
45 |
+
for message in st.session_state.chat_history:
|
46 |
with st.chat_message(message["role"]):
|
47 |
st.markdown(message["content"])
|
48 |
|
49 |
# React to user input
|
50 |
+
if user_input := st.chat_input("Ask Llama-2-70B anything..."):
|
51 |
# Display user message in chat message container
|
52 |
+
st.chat_message("user", avatar="π§βπ»").markdown(user_input)
|
53 |
# Add user message to chat history
|
54 |
+
st.session_state.chat_history.append({"role": "user", "content": user_input})
|
55 |
|
56 |
+
response = get_llama2_response(
|
57 |
+
user_input,
|
58 |
+
system_prompt_input,
|
59 |
+
temperature_slider,
|
60 |
+
max_new_tokens_slider,
|
61 |
+
topp_slider,
|
62 |
+
repetition_penalty_slider
|
63 |
+
)
|
64 |
# Display assistant response in chat message container
|
65 |
with st.chat_message("assistant", avatar='π¦'):
|
66 |
st.markdown(response)
|
67 |
# Add assistant response to chat history
|
68 |
+
st.session_state.chat_history.append({"role": "assistant", "content": response})
|