kseth9852 commited on
Commit
2b9a5dc
·
verified ·
1 Parent(s): 401e8d2
Files changed (1) hide show
  1. app.py +18 -50
app.py CHANGED
@@ -23,29 +23,14 @@ def extract_text(file):
23
  # Load Hugging Face model for medical explanation
24
  @st.cache_resource
25
  def load_explainer():
26
- return pipeline("text2text-generation", model="google/flan-t5-base")
27
-
28
- # Visual card mapping
29
- def get_visual_card(term):
30
- visuals = {
31
- "ct scan": "visuals/ct_scan.png",
32
- "mri": "visuals/mri.png",
33
- "hemoglobin": "visuals/hemoglobin.png",
34
- "creatinine": "visuals/creatinine.png",
35
- "platelets": "visuals/platelets.png",
36
- "ecg": "visuals/ecg.png"
37
- }
38
- for key in visuals:
39
- if key in term.lower():
40
- return visuals[key]
41
- return None
42
 
43
  # Main logic
44
  if uploaded_file:
45
  text_data = extract_text(uploaded_file)
46
  st.success("Health Report Uploaded Successfully!")
47
 
48
- # Display the report text without label
49
  st.markdown("### 📄 Health Report Content")
50
  st.write(text_data)
51
 
@@ -53,8 +38,6 @@ if uploaded_file:
53
 
54
  # Store extracted text in session
55
  st.session_state['report_text'] = text_data
56
- if 'chat_history' not in st.session_state:
57
- st.session_state['chat_history'] = []
58
 
59
  # Chatbot
60
  st.subheader("💬 Ask About Any Medical Term or Part of the Report")
@@ -63,43 +46,28 @@ if uploaded_file:
63
 
64
  if st.button("Get AI Explanation") and user_question:
65
  with st.spinner("Thinking..."):
66
- previous_context = "\n".join([
67
- f"Q: {q}\nA: {a}" for q, a in st.session_state['chat_history'][-3:]
68
- ])
69
-
70
  prompt = (
71
- f"You are a helpful medical assistant. "
72
- f"Please explain the following medical term or phrase in very simple words.\n"
73
- f"Health Report Content:\n{st.session_state['report_text']}\n"
74
- f"Previous Conversation:\n{previous_context}\n"
75
- f"Current Question: {user_question}\n"
76
- f"Answer in easy language."
77
  )
78
 
79
- response = explainer(prompt)[0]['generated_text']
80
- st.session_state['chat_history'].append((user_question, response))
81
 
82
  st.success("Explanation:")
83
  st.write(response)
84
 
85
- # Visual card
86
- image_path = get_visual_card(user_question)
87
- if image_path and os.path.exists(image_path):
88
- st.image(image_path, caption=f"Visual aid for: {user_question}", use_column_width=True)
89
-
90
- # Text-to-speech
91
- if st.button("🔊 Listen to this explanation"):
92
- tts = gTTS(text=response)
93
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
94
- tts.save(tmp_file.name)
95
- st.audio(tmp_file.name, format='audio/mp3')
96
-
97
- # Display chat history
98
- if st.session_state['chat_history']:
99
- st.markdown("---")
100
- st.markdown("### 💬 Chat History")
101
- for q, a in reversed(st.session_state['chat_history']):
102
- st.markdown(f"**You:** {q}")
103
- st.markdown(f"**AI:** {a}")
104
  else:
105
  st.info("Upload a PDF Health Report to begin.")
 
23
  # Load Hugging Face model for medical explanation
24
  @st.cache_resource
25
  def load_explainer():
26
+ return pipeline("text2text-generation", model="google/flan-t5-large")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  # Main logic
29
  if uploaded_file:
30
  text_data = extract_text(uploaded_file)
31
  st.success("Health Report Uploaded Successfully!")
32
 
33
+ # Display the report text
34
  st.markdown("### 📄 Health Report Content")
35
  st.write(text_data)
36
 
 
38
 
39
  # Store extracted text in session
40
  st.session_state['report_text'] = text_data
 
 
41
 
42
  # Chatbot
43
  st.subheader("💬 Ask About Any Medical Term or Part of the Report")
 
46
 
47
  if st.button("Get AI Explanation") and user_question:
48
  with st.spinner("Thinking..."):
49
+
50
+ # Enhanced prompt for better answers
 
 
51
  prompt = (
52
+ f"You are a friendly and experienced medical assistant. "
53
+ f"Explain this term in very simple language. "
54
+ f"Provide a short definition and mention why it's important or what it means in a report.\n\n"
55
+ f"Question: {user_question}"
 
 
56
  )
57
 
58
+ response = explainer(prompt, max_length=300)[0]['generated_text']
 
59
 
60
  st.success("Explanation:")
61
  st.write(response)
62
 
63
+ # Text-to-speech using gTTS
64
+ tts = gTTS(text=response)
65
+ temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
66
+ tts.save(temp_audio.name)
67
+
68
+ audio_file = open(temp_audio.name, 'rb')
69
+ audio_bytes = audio_file.read()
70
+ st.audio(audio_bytes, format='audio/mp3')
71
+
 
 
 
 
 
 
 
 
 
 
72
  else:
73
  st.info("Upload a PDF Health Report to begin.")