Tanifh commited on
Commit
7ff24a3
Β·
verified Β·
1 Parent(s): dbd53de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -20
app.py CHANGED
@@ -21,7 +21,7 @@ if not os.path.exists(MODEL_PATH):
21
  f.write(chunk)
22
  st.success("Model downloaded successfully!")
23
  except requests.exceptions.HTTPError as e:
24
- st.error(f"🚨 Model download failed: {e}")
25
  st.stop()
26
 
27
  # βœ… Load optimized model with reduced context length
@@ -34,33 +34,30 @@ try:
34
  numa=True,
35
  n_batch=64 # βœ… Faster token processing
36
  )
37
- st.write("βœ… Model loaded successfully!")
38
  except Exception as e:
39
- st.error(f"🚨 Error loading model: {e}")
40
  st.stop()
41
 
42
- # Streamlit UI setup
43
  st.title("πŸ€– Phi-3 Mini Chatbot")
44
- st.markdown("Enter a message and get responses from Phi-3 Mini!")
45
 
46
  # Chat history
47
  if "messages" not in st.session_state:
48
  st.session_state["messages"] = []
49
 
50
  # Display chat history
51
- for message in st.session_state["messages"]:
52
- role, text = message
53
- if role == "user":
54
- st.chat_message("user").write(text)
55
- else:
56
- st.chat_message("assistant").write(text)
57
 
58
  # Input field for user message
59
  user_input = st.text_input("Your Message:", "", key="user_input")
60
  if st.button("Send") and user_input:
61
  # Add user input to chat history
62
  st.session_state["messages"].append(("user", user_input))
63
- st.chat_message("user").write(user_input)
 
64
 
65
  # βœ… Use a minimal prompt format (no system message)
66
  formatted_messages = [{"role": "user", "content": user_input}]
@@ -68,24 +65,24 @@ if st.button("Send") and user_input:
68
  # βœ… Speed improvements: Increase max_tokens for full responses & ensure proper stopping
69
  response_data = st.session_state["model"].create_chat_completion(
70
  messages=formatted_messages,
71
- max_tokens=128, temperature=0.5, top_p=0.8,
72
  stop=["\n", "<|endoftext|>"], # βœ… Ensures responses end properly
73
- stream=False # βœ… No streaming for debugging
74
  )
75
 
76
- # βœ… Debugging output
77
- st.write("πŸ” Debug: Raw Model Response:", response_data)
78
-
79
  if "choices" in response_data and len(response_data["choices"]) > 0:
80
  choice = response_data["choices"][0]
81
  if "message" in choice and "content" in choice["message"]:
82
  response_text = choice["message"]["content"].strip()
83
  st.session_state["messages"].append(("assistant", response_text))
84
- st.chat_message("assistant").write(response_text)
 
85
  else:
86
- st.error("⚠️ No valid response content found.")
87
  else:
88
- st.error("⚠️ Model did not return any choices.")
 
89
 
90
 
91
 
 
21
  f.write(chunk)
22
  st.success("Model downloaded successfully!")
23
  except requests.exceptions.HTTPError as e:
24
+ st.error("🚨 Model download failed. Please try again later.")
25
  st.stop()
26
 
27
  # βœ… Load optimized model with reduced context length
 
34
  numa=True,
35
  n_batch=64 # βœ… Faster token processing
36
  )
 
37
  except Exception as e:
38
+ st.error("🚨 Error loading model. Please restart the application.")
39
  st.stop()
40
 
41
+ # 🌟 User-Friendly Chat Interface
42
  st.title("πŸ€– Phi-3 Mini Chatbot")
43
+ st.markdown("### Ask me anything and I'll provide helpful responses!")
44
 
45
  # Chat history
46
  if "messages" not in st.session_state:
47
  st.session_state["messages"] = []
48
 
49
  # Display chat history
50
+ for role, text in st.session_state["messages"]:
51
+ with st.chat_message(role):
52
+ st.write(text)
 
 
 
53
 
54
  # Input field for user message
55
  user_input = st.text_input("Your Message:", "", key="user_input")
56
  if st.button("Send") and user_input:
57
  # Add user input to chat history
58
  st.session_state["messages"].append(("user", user_input))
59
+ with st.chat_message("user"):
60
+ st.write(user_input)
61
 
62
  # βœ… Use a minimal prompt format (no system message)
63
  formatted_messages = [{"role": "user", "content": user_input}]
 
65
  # βœ… Speed improvements: Increase max_tokens for full responses & ensure proper stopping
66
  response_data = st.session_state["model"].create_chat_completion(
67
  messages=formatted_messages,
68
+ max_tokens=96, temperature=0.5, top_p=0.8,
69
  stop=["\n", "<|endoftext|>"], # βœ… Ensures responses end properly
70
+ stream=False
71
  )
72
 
73
+ # βœ… Extract and display response
 
 
74
  if "choices" in response_data and len(response_data["choices"]) > 0:
75
  choice = response_data["choices"][0]
76
  if "message" in choice and "content" in choice["message"]:
77
  response_text = choice["message"]["content"].strip()
78
  st.session_state["messages"].append(("assistant", response_text))
79
+ with st.chat_message("assistant"):
80
+ st.write(response_text)
81
  else:
82
+ st.error("⚠️ Unable to generate a response. Please try again.")
83
  else:
84
+ st.error("⚠️ No response received. Please ask again.")
85
+
86
 
87
 
88