Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -458,9 +458,9 @@ def main():
|
|
458 |
st.write(f"You can ask up to {remaining} more question(s).")
|
459 |
|
460 |
st.write("### Record Your Follow-Up Question:")
|
461 |
-
# Use streamlit-webrtc
|
462 |
from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
|
463 |
from qa import AudioBufferProcessor
|
|
|
464 |
RTC_CONFIGURATION = {"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}
|
465 |
|
466 |
webrtc_ctx = webrtc_streamer(
|
@@ -474,18 +474,19 @@ def main():
|
|
474 |
if "audio-processor" not in st.session_state:
|
475 |
st.session_state["audio-processor"] = None
|
476 |
|
|
|
477 |
if webrtc_ctx.state.playing and webrtc_ctx.audio_processor:
|
478 |
st.session_state["audio-processor"] = webrtc_ctx.audio_processor
|
479 |
|
480 |
-
#
|
481 |
-
if
|
482 |
st.write("Recording Stopped. You may now submit your question.")
|
483 |
|
|
|
484 |
if st.button("Submit Q&A"):
|
485 |
if used_questions >= MAX_QA_QUESTIONS:
|
486 |
st.warning("You have reached the Q&A limit.")
|
487 |
else:
|
488 |
-
# 1) Finalize WAV
|
489 |
processor = st.session_state.get("audio-processor")
|
490 |
if not processor or not getattr(processor, "frames", None):
|
491 |
st.warning("No recorded audio found. Please record your question first.")
|
@@ -494,7 +495,6 @@ def main():
|
|
494 |
if not local_wav_path:
|
495 |
st.warning("No audio frames found. Please record again.")
|
496 |
else:
|
497 |
-
# 2) Transcribe with Deepgram (same logic as your old approach)
|
498 |
from qa import transcribe_audio_deepgram
|
499 |
st.write("Transcribing your voice question via Deepgram...")
|
500 |
question_text = transcribe_audio_deepgram(local_wav_path)
|
@@ -503,7 +503,6 @@ def main():
|
|
503 |
else:
|
504 |
st.write(f"**You asked**: {question_text}")
|
505 |
|
506 |
-
# 3) Generate an LLM answer
|
507 |
conversation_so_far = st.session_state["conversation_history"]
|
508 |
ans_audio, ans_text = handle_qa_exchange(conversation_so_far, question_text)
|
509 |
if ans_audio:
|
|
|
458 |
st.write(f"You can ask up to {remaining} more question(s).")
|
459 |
|
460 |
st.write("### Record Your Follow-Up Question:")
|
|
|
461 |
from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
|
462 |
from qa import AudioBufferProcessor
|
463 |
+
|
464 |
RTC_CONFIGURATION = {"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}
|
465 |
|
466 |
webrtc_ctx = webrtc_streamer(
|
|
|
474 |
if "audio-processor" not in st.session_state:
|
475 |
st.session_state["audio-processor"] = None
|
476 |
|
477 |
+
# If the stream is currently playing, store the processor
|
478 |
if webrtc_ctx.state.playing and webrtc_ctx.audio_processor:
|
479 |
st.session_state["audio-processor"] = webrtc_ctx.audio_processor
|
480 |
|
481 |
+
# Instead of checking .state.status, we check .state.playing
|
482 |
+
if not webrtc_ctx.state.playing:
|
483 |
st.write("Recording Stopped. You may now submit your question.")
|
484 |
|
485 |
+
# Submit button
|
486 |
if st.button("Submit Q&A"):
|
487 |
if used_questions >= MAX_QA_QUESTIONS:
|
488 |
st.warning("You have reached the Q&A limit.")
|
489 |
else:
|
|
|
490 |
processor = st.session_state.get("audio-processor")
|
491 |
if not processor or not getattr(processor, "frames", None):
|
492 |
st.warning("No recorded audio found. Please record your question first.")
|
|
|
495 |
if not local_wav_path:
|
496 |
st.warning("No audio frames found. Please record again.")
|
497 |
else:
|
|
|
498 |
from qa import transcribe_audio_deepgram
|
499 |
st.write("Transcribing your voice question via Deepgram...")
|
500 |
question_text = transcribe_audio_deepgram(local_wav_path)
|
|
|
503 |
else:
|
504 |
st.write(f"**You asked**: {question_text}")
|
505 |
|
|
|
506 |
conversation_so_far = st.session_state["conversation_history"]
|
507 |
ans_audio, ans_text = handle_qa_exchange(conversation_so_far, question_text)
|
508 |
if ans_audio:
|