ashok2216 commited on
Commit
4d72a21
·
verified ·
1 Parent(s): f8fb2df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -7
app.py CHANGED
@@ -59,15 +59,44 @@ for message in st.session_state.messages:
59
  st.markdown(message["content"])
60
 
61
  # Function to generate responses
62
- def generate_response(messages):
63
- # Convert the messages to a format compatible with Hugging Face Inference API
64
- messages_str = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages if msg['role'] != 'system'])
65
 
66
- # Make API call to generate response
67
- output = client.chat_completion(inputs=messages_str, max_tokens, temperature, top_p)
68
 
69
- return output['generated_text']
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  # User input
72
  if user_input := st.chat_input("Ask a health question..."):
73
  # Display user message
@@ -76,7 +105,7 @@ if user_input := st.chat_input("Ask a health question..."):
76
  st.session_state.messages.append({"role": "user", "content": user_input})
77
 
78
  # Generate and display assistant response
79
- response = generate_response(st.session_state.messages)
80
  with st.chat_message("assistant"):
81
  st.markdown(response)
82
  st.session_state.messages.append({"role": "assistant", "content": response})
 
59
  st.markdown(message["content"])
60
 
61
  # Function to generate responses
62
+ # def generate_response(messages):
63
+ # # Convert the messages to a format compatible with Hugging Face Inference API
64
+ # messages_str = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages if msg['role'] != 'system'])
65
 
66
+ # # Make API call to generate response
67
+ # output = client(inputs=messages_str, max_tokens, temperature, top_p)
68
 
69
+ # return output['generated_text']
70
 
71
+ def respond(message, history, max_tokens, temperature, top_p):
72
+ # Prepare the list of messages for the chat completion
73
+ messages = [{"role": "system", "content": st.session_state.messages[0]["content"]}]
74
+
75
+ for val in history:
76
+ if val["role"] == "user":
77
+ messages.append({"role": "user", "content": val["content"]})
78
+ elif val["role"] == "assistant":
79
+ messages.append({"role": "assistant", "content": val["content"]})
80
+
81
+ messages.append({"role": "user", "content": message})
82
+
83
+ # Generate response
84
+ response = ""
85
+ response_container = st.empty() # Placeholder to update the response text dynamically
86
+
87
+ for message in client.chat_completion(
88
+ messages,
89
+ max_tokens=max_tokens,
90
+ stream=True,
91
+ temperature=temperature,
92
+ top_p=top_p,
93
+ ):
94
+ token = message.choices[0].delta.content
95
+ response += token
96
+ response_container.text(response) # Stream the response
97
+
98
+ return response
99
+
100
  # User input
101
  if user_input := st.chat_input("Ask a health question..."):
102
  # Display user message
 
105
  st.session_state.messages.append({"role": "user", "content": user_input})
106
 
107
  # Generate and display assistant response
108
+ response = respond(user_input, st.session_state.messages, max_tokens, temperature, top_p)
109
  with st.chat_message("assistant"):
110
  st.markdown(response)
111
  st.session_state.messages.append({"role": "assistant", "content": response})