Spaces:
Sleeping
Sleeping
File size: 6,942 Bytes
8c4492e 4102a15 13919c8 b386f62 8c4492e c1043ca 9c9251a 8c4492e c1043ca 9c9251a 8c4492e 809b532 8c4492e c1043ca 8c4492e c1043ca 8c4492e 4102a15 c1043ca 8c4492e 4102a15 8c4492e c1043ca 8c4492e 4102a15 8c4492e 9c9251a 4102a15 9c9251a 4102a15 9c9251a 4102a15 9c9251a 809b532 9c9251a 4102a15 9c9251a 3bbf4ab 9c9251a bcaf273 4102a15 bcaf273 809b532 57d0c38 8c4492e 9c9251a 809b532 13919c8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import streamlit as st
from streamlit_webrtc import webrtc_streamer, AudioProcessorBase
import av
import numpy as np
import tempfile
import soundfile as sf
import os
import time
import re
from openai import OpenAI
# ------------------ Audio Processor ------------------
class AudioRecorder(AudioProcessorBase):
def __init__(self):
self.recorded_frames = []
def recv(self, frame: av.AudioFrame) -> av.AudioFrame:
self.recorded_frames.append(frame)
return frame
# ------------------ App Configuration ------------------
st.set_page_config(page_title="Document AI Assistant", layout="wide")
st.title("π Document AI Assistant")
st.caption("Chat with an AI Assistant on your medical/pathology documents")
# ------------------ Load API Key and Assistant ID from Hugging Face Secrets ------------------
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
ASSISTANT_ID = os.environ.get("ASSISTANT_ID")
# ------------------ Error Handling for Missing Secrets ------------------
if not OPENAI_API_KEY or not ASSISTANT_ID:
st.error("β Missing secrets. Please ensure both OPENAI_API_KEY and ASSISTANT_ID are set in your Hugging Face Space secrets.")
st.stop()
client = OpenAI(api_key=OPENAI_API_KEY)
# ------------------ Session State Initialization ------------------
if "messages" not in st.session_state:
st.session_state.messages = []
if "thread_id" not in st.session_state:
st.session_state.thread_id = None
if "image_url" not in st.session_state:
st.session_state.image_url = None
if "image_updated" not in st.session_state:
st.session_state.image_updated = False
if "transcript_preview" not in st.session_state:
st.session_state.transcript_preview = None
# ------------------ Sidebar Controls ------------------
st.sidebar.header("π§ Settings")
if st.sidebar.button("π Clear Chat"):
st.session_state.messages = []
st.session_state.thread_id = None
st.session_state.image_url = None
st.session_state.image_updated = False
st.session_state.transcript_preview = None
st.rerun()
show_image = st.sidebar.checkbox("π Show Document Image", value=True)
preview_toggle = st.sidebar.checkbox("π Preview transcription before sending", value=True)
# ------------------ Split Layout ------------------
col1, col2 = st.columns([1, 2])
# ------------------ Image Panel (Left) ------------------
with col1:
if show_image and st.session_state.image_url:
st.image(st.session_state.image_url, caption="π Extracted Page", use_container_width=True)
st.session_state.image_updated = False
# ------------------ Voice Input Processing ------------------
with col2:
st.markdown("### ποΈ Voice Input (Optional)")
webrtc_ctx = webrtc_streamer(
key="voice-input",
mode="SENDONLY",
audio_processor_factory=AudioRecorder,
media_stream_constraints={"audio": True, "video": False},
async_processing=True,
)
if webrtc_ctx.audio_processor and not webrtc_ctx.state.playing and webrtc_ctx.audio_processor.recorded_frames:
st.info("Transcribing your voice...")
wav_path = tempfile.mktemp(suffix=".wav")
with open(wav_path, "wb") as f:
frames = webrtc_ctx.audio_processor.recorded_frames
audio = frames[0].to_ndarray()
for frame in frames[1:]:
audio = np.concatenate((audio, frame.to_ndarray()), axis=1)
sf.write(f, audio.T, samplerate=frames[0].sample_rate, format="WAV")
audio_file = open(wav_path, "rb")
try:
whisper_result = client.audio.transcriptions.create(model="whisper-1", file=audio_file, response_format="json")
transcript = whisper_result.text.strip()
confidence = whisper_result.get("confidence", "N/A")
if transcript:
st.success(f"Recognized: {transcript}")
st.caption(f"π§ Confidence: {confidence}")
if preview_toggle:
st.session_state.transcript_preview = transcript
else:
st.session_state.messages.append({"role": "user", "content": transcript})
st.rerun()
except Exception as e:
st.error(f"β Transcription failed: {str(e)}")
if st.session_state.transcript_preview:
st.markdown("---")
st.markdown("### π Transcription Preview")
st.markdown(f"> {st.session_state.transcript_preview}")
if st.button("β
Send to Assistant"):
st.session_state.messages.append({"role": "user", "content": st.session_state.transcript_preview})
st.session_state.transcript_preview = None
st.rerun()
if st.button("β Discard"):
st.session_state.transcript_preview = None
st.rerun()
# ------------------ Chat Panel (Right) ------------------
with col2:
if prompt := st.chat_input("Type your question about the document..."):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
try:
if st.session_state.thread_id is None:
thread = client.beta.threads.create()
st.session_state.thread_id = thread.id
thread_id = st.session_state.thread_id
client.beta.threads.messages.create(thread_id=thread_id, role="user", content=prompt)
run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=ASSISTANT_ID)
with st.spinner("π€ Assistant is thinking..."):
while True:
run_status = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
if run_status.status == "completed":
break
time.sleep(1)
messages = client.beta.threads.messages.list(thread_id=thread_id)
assistant_message = None
for message in reversed(messages.data):
if message.role == "assistant":
assistant_message = message.content[0].text.value
break
st.chat_message("assistant").write(assistant_message)
st.session_state.messages.append({"role": "assistant", "content": assistant_message})
image_match = re.search(
r'https://raw\\.githubusercontent\\.com/AndrewLORTech/surgical-pathology-manual/main/[\\w\\-/]*\\.png',
assistant_message
)
if image_match:
st.session_state.image_url = image_match.group(0)
st.session_state.image_updated = True
st.rerun()
except Exception as e:
st.error(f"β Error: {str(e)}")
for message in reversed(st.session_state.messages):
role, content = message["role"], message["content"]
st.chat_message(role).write(content) |