Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,92 +1,16 @@
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from openai import OpenAI
|
3 |
-
import time
|
4 |
-
import re
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
if not OPENAI_API_KEY:
|
15 |
-
st.error("Please enter your C2 Group of Technologies Access Key to continue.")
|
16 |
-
st.stop()
|
17 |
-
|
18 |
-
client = OpenAI(api_key=OPENAI_API_KEY)
|
19 |
-
ASSISTANT_ID = "asst_PJjxQftfz2IJUUMvnldK58lB"
|
20 |
-
|
21 |
-
# Session state setup
|
22 |
-
if "messages" not in st.session_state:
|
23 |
-
st.session_state.messages = []
|
24 |
-
if "thread_id" not in st.session_state:
|
25 |
-
st.session_state.thread_id = None
|
26 |
-
|
27 |
-
# Clear button
|
28 |
-
if st.button("Clear Chat", use_container_width=True):
|
29 |
-
st.session_state.messages = []
|
30 |
-
st.session_state.thread_id = None
|
31 |
-
st.rerun()
|
32 |
-
|
33 |
-
# Display prior messages
|
34 |
-
for message in st.session_state.messages:
|
35 |
-
role, content = message["role"], message["content"]
|
36 |
-
st.chat_message(role).write(content)
|
37 |
-
|
38 |
-
# Handle input
|
39 |
-
if prompt := st.chat_input():
|
40 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
41 |
-
st.chat_message("user").write(prompt)
|
42 |
-
|
43 |
-
try:
|
44 |
-
if st.session_state.thread_id is None:
|
45 |
-
thread = client.beta.threads.create()
|
46 |
-
st.session_state.thread_id = thread.id
|
47 |
-
|
48 |
-
thread_id = st.session_state.thread_id
|
49 |
-
|
50 |
-
# Send prompt to assistant
|
51 |
-
client.beta.threads.messages.create(
|
52 |
-
thread_id=thread_id,
|
53 |
-
role="user",
|
54 |
-
content=prompt
|
55 |
-
)
|
56 |
-
|
57 |
-
# Run assistant
|
58 |
-
run = client.beta.threads.runs.create(
|
59 |
-
thread_id=thread_id,
|
60 |
-
assistant_id=ASSISTANT_ID
|
61 |
)
|
62 |
-
|
63 |
-
# Wait for assistant to finish
|
64 |
-
while True:
|
65 |
-
run_status = client.beta.threads.runs.retrieve(
|
66 |
-
thread_id=thread_id,
|
67 |
-
run_id=run.id
|
68 |
-
)
|
69 |
-
if run_status.status == "completed":
|
70 |
-
break
|
71 |
-
time.sleep(1)
|
72 |
-
|
73 |
-
# Get assistant response
|
74 |
-
messages = client.beta.threads.messages.list(thread_id=thread_id)
|
75 |
-
assistant_message = None
|
76 |
-
for message in reversed(messages.data):
|
77 |
-
if message.role == "assistant":
|
78 |
-
assistant_message = message.content[0].text.value
|
79 |
-
break
|
80 |
-
|
81 |
-
# Display message
|
82 |
-
st.chat_message("assistant").write(assistant_message)
|
83 |
-
st.session_state.messages.append({"role": "assistant", "content": assistant_message})
|
84 |
-
|
85 |
-
# 🔍 Extract and display GitHub image if present
|
86 |
-
image_match = re.search(r'https://raw\.githubusercontent\.com/AndrewLORTech/surgical-pathology-manual/main/[\w\-/]*\.png', assistant_message)
|
87 |
-
if image_match:
|
88 |
-
image_url = image_match.group(0)
|
89 |
-
st.image(image_url, caption="Page Image", use_container_width=True)
|
90 |
-
|
91 |
-
except Exception as e:
|
92 |
-
st.error(f"Error: {str(e)}")
|
|
|
1 |
+
import base64
|
2 |
+
import requests
|
3 |
+
import tempfile
|
4 |
import streamlit as st
|
5 |
from openai import OpenAI
|
|
|
|
|
6 |
|
7 |
+
# Whisper transcription function
|
8 |
+
def transcribe_audio(file_path, api_key):
|
9 |
+
with open(file_path, "rb") as f:
|
10 |
+
response = requests.post(
|
11 |
+
"https://api.openai.com/v1/audio/transcriptions",
|
12 |
+
headers={"Authorization": f"Bearer {api_key}"},
|
13 |
+
files={"file": f},
|
14 |
+
data={"model": "whisper-1"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
)
|
16 |
+
return response.json().get("text", None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|