karthikeyan-r commited on
Commit
f0b46d5
·
verified ·
1 Parent(s): 3736789

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -8
app.py CHANGED
@@ -3,7 +3,7 @@ from transformers import T5ForConditionalGeneration, T5Tokenizer, pipeline
3
  import torch
4
 
5
  # Streamlit app setup
6
- st.set_page_config(page_title="Hugging Face Chat", layout="wide")
7
 
8
  # Sidebar: Model controls
9
  st.sidebar.title("Model Controls")
@@ -52,12 +52,16 @@ if clear_model_button:
52
  st.success("Model cleared.")
53
 
54
  # Chat Conversation Display
55
- st.subheader("Conversation")
56
- for idx, (speaker, message) in enumerate(st.session_state["conversation"]):
57
- if speaker == "You":
58
- st.markdown(f"**You:** {message}")
59
- else:
60
- st.markdown(f"**Model:** {message}")
 
 
 
 
61
 
62
  # Input Area
63
  if st.session_state["qa_pipeline"]:
@@ -70,11 +74,19 @@ if st.session_state["qa_pipeline"]:
70
  if user_input:
71
  with st.spinner("Generating response..."):
72
  try:
 
73
  response = st.session_state["qa_pipeline"](f"Q: {user_input}", max_length=400)
74
  generated_text = response[0]["generated_text"]
 
 
75
  st.session_state["conversation"].append(("You", user_input))
76
  st.session_state["conversation"].append(("Model", generated_text))
77
- st.session_state["user_input"] = "" # Clear input after submission
 
 
 
 
 
78
  except Exception as e:
79
  st.error(f"Error generating response: {e}")
80
 
 
3
  import torch
4
 
5
  # Streamlit app setup
6
+ st.set_page_config(page_title="Chat", layout="wide")
7
 
8
  # Sidebar: Model controls
9
  st.sidebar.title("Model Controls")
 
52
  st.success("Model cleared.")
53
 
54
  # Chat Conversation Display
55
+ def display_conversation():
56
+ """Display the chat conversation dynamically."""
57
+ st.subheader("Conversation")
58
+ for idx, (speaker, message) in enumerate(st.session_state["conversation"]):
59
+ if speaker == "You":
60
+ st.markdown(f"**You:** {message}")
61
+ else:
62
+ st.markdown(f"**Model:** {message}")
63
+
64
+ display_conversation()
65
 
66
  # Input Area
67
  if st.session_state["qa_pipeline"]:
 
74
  if user_input:
75
  with st.spinner("Generating response..."):
76
  try:
77
+ # Generate the model response
78
  response = st.session_state["qa_pipeline"](f"Q: {user_input}", max_length=400)
79
  generated_text = response[0]["generated_text"]
80
+
81
+ # Update the conversation
82
  st.session_state["conversation"].append(("You", user_input))
83
  st.session_state["conversation"].append(("Model", generated_text))
84
+
85
+ # Clear the input field after submission
86
+ st.session_state["user_input"] = ""
87
+
88
+ # Rerender the conversation immediately
89
+ display_conversation()
90
  except Exception as e:
91
  st.error(f"Error generating response: {e}")
92