Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import requests
|
|
3 |
import streamlit as st
|
4 |
from llama_cpp import Llama
|
5 |
|
6 |
-
# β
|
7 |
st.set_page_config(page_title="Phi-3 Mini Chatbot", layout="centered")
|
8 |
|
9 |
# β
Define model path
|
@@ -25,8 +25,13 @@ if not os.path.exists(MODEL_PATH):
|
|
25 |
st.stop()
|
26 |
|
27 |
# β
Load model
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
# Streamlit UI setup
|
32 |
st.title("π€ Phi-3 Mini Chatbot")
|
@@ -52,13 +57,15 @@ if st.button("Send") and user_input:
|
|
52 |
st.chat_message("user").write(user_input)
|
53 |
|
54 |
# Generate response
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
62 |
|
63 |
# Run the app with: streamlit run app.py
|
64 |
|
|
|
3 |
import streamlit as st
|
4 |
from llama_cpp import Llama
|
5 |
|
6 |
+
# β
Streamlit Page Config (Must be first)
|
7 |
st.set_page_config(page_title="Phi-3 Mini Chatbot", layout="centered")
|
8 |
|
9 |
# β
Define model path
|
|
|
25 |
st.stop()
|
26 |
|
27 |
# β
Load model
|
28 |
+
try:
|
29 |
+
if "model" not in st.session_state:
|
30 |
+
st.session_state["model"] = Llama(model_path=MODEL_PATH, n_ctx=4096)
|
31 |
+
st.write("β
Model loaded successfully!")
|
32 |
+
except Exception as e:
|
33 |
+
st.error(f"π¨ Error loading model: {e}")
|
34 |
+
st.stop()
|
35 |
|
36 |
# Streamlit UI setup
|
37 |
st.title("π€ Phi-3 Mini Chatbot")
|
|
|
57 |
st.chat_message("user").write(user_input)
|
58 |
|
59 |
# Generate response
|
60 |
+
try:
|
61 |
+
response = st.session_state["model"].create_completion(
|
62 |
+
prompt=user_input, max_tokens=1024, temperature=0.7, top_p=0.9
|
63 |
+
)
|
64 |
+
response_text = response["choices"][0]["text"].strip()
|
65 |
+
st.session_state["messages"].append(("assistant", response_text))
|
66 |
+
st.chat_message("assistant").write(response_text)
|
67 |
+
except Exception as e:
|
68 |
+
st.error(f"π¨ Error generating response: {e}")
|
69 |
|
70 |
# Run the app with: streamlit run app.py
|
71 |
|