Update ui/ui_core.py
Browse files- ui/ui_core.py +20 -18
ui/ui_core.py
CHANGED
@@ -33,7 +33,7 @@ def extract_all_text_from_csv_or_excel(file_path: str, progress=None, index=0, t
|
|
33 |
line = " | ".join(str(cell) for cell in row if pd.notna(cell))
|
34 |
if line:
|
35 |
lines.append(line)
|
36 |
-
return f"
|
37 |
|
38 |
except Exception as e:
|
39 |
return f"[Error reading {os.path.basename(file_path)}]: {str(e)}"
|
@@ -54,7 +54,7 @@ def extract_all_text_from_pdf(file_path: str, progress=None, index=0, total=1) -
|
|
54 |
progress((index + (i / num_pages)) / total, desc=f"Reading PDF: {os.path.basename(file_path)} ({i+1}/{num_pages})")
|
55 |
except Exception as e:
|
56 |
extracted.append(f"[Error reading page {i+1}]: {str(e)}")
|
57 |
-
return f"
|
58 |
|
59 |
except Exception as e:
|
60 |
return f"[Error reading PDF {os.path.basename(file_path)}]: {str(e)}"
|
@@ -78,7 +78,7 @@ def chunk_text(text: str, max_tokens: int = 8192) -> List[str]:
|
|
78 |
|
79 |
def create_ui(agent: TxAgent):
|
80 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
81 |
-
gr.Markdown("<h1 style='text-align: center;'
|
82 |
|
83 |
chatbot = gr.Chatbot(label="CPS Assistant", height=600, type="messages", show_copy_button=True)
|
84 |
file_upload = gr.File(
|
@@ -95,15 +95,18 @@ def create_ui(agent: TxAgent):
|
|
95 |
"You are an expert clinical AI assistant reviewing medical form or interview data. "
|
96 |
"Your job is to analyze this data and reason about any information or red flags that a human doctor might have overlooked. "
|
97 |
"Provide a **detailed and structured response**, including examples, supporting evidence from the form, and clinical rationale for why these items matter. "
|
98 |
-
"Ensure the output is informative and helpful for improving patient care. "
|
99 |
"Do not hallucinate. Base the response only on the provided form content. "
|
100 |
"End with a section labeled '🧠 Final Analysis' where you summarize key findings the doctor may have missed."
|
101 |
)
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
|
|
|
|
|
|
|
106 |
|
|
|
107 |
extracted_text = ""
|
108 |
if uploaded_files and isinstance(uploaded_files, list):
|
109 |
total_files = len(uploaded_files)
|
@@ -145,19 +148,18 @@ def create_ui(agent: TxAgent):
|
|
145 |
all_responses += update
|
146 |
|
147 |
all_responses = sanitize_utf8(all_responses.strip())
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
yield final_history
|
153 |
|
154 |
except Exception as chat_error:
|
155 |
print(f"Chat error: {chat_error}")
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
yield
|
161 |
|
162 |
inputs = [message_input, chatbot, conversation_state, file_upload]
|
163 |
send_button.click(fn=handle_chat, inputs=inputs, outputs=chatbot)
|
@@ -169,4 +171,4 @@ def create_ui(agent: TxAgent):
|
|
169 |
["Is there anything abnormal in the attached blood work report?"]
|
170 |
], inputs=message_input)
|
171 |
|
172 |
-
return demo
|
|
|
33 |
line = " | ".join(str(cell) for cell in row if pd.notna(cell))
|
34 |
if line:
|
35 |
lines.append(line)
|
36 |
+
return f"📄 {os.path.basename(file_path)}\n\n" + "\n".join(lines)
|
37 |
|
38 |
except Exception as e:
|
39 |
return f"[Error reading {os.path.basename(file_path)}]: {str(e)}"
|
|
|
54 |
progress((index + (i / num_pages)) / total, desc=f"Reading PDF: {os.path.basename(file_path)} ({i+1}/{num_pages})")
|
55 |
except Exception as e:
|
56 |
extracted.append(f"[Error reading page {i+1}]: {str(e)}")
|
57 |
+
return f"📄 {os.path.basename(file_path)}\n\n" + "\n\n".join(extracted)
|
58 |
|
59 |
except Exception as e:
|
60 |
return f"[Error reading PDF {os.path.basename(file_path)}]: {str(e)}"
|
|
|
78 |
|
79 |
def create_ui(agent: TxAgent):
|
80 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
81 |
+
gr.Markdown("<h1 style='text-align: center;'>📋 CPS: Clinical Patient Support System</h1>")
|
82 |
|
83 |
chatbot = gr.Chatbot(label="CPS Assistant", height=600, type="messages", show_copy_button=True)
|
84 |
file_upload = gr.File(
|
|
|
95 |
"You are an expert clinical AI assistant reviewing medical form or interview data. "
|
96 |
"Your job is to analyze this data and reason about any information or red flags that a human doctor might have overlooked. "
|
97 |
"Provide a **detailed and structured response**, including examples, supporting evidence from the form, and clinical rationale for why these items matter. "
|
|
|
98 |
"Do not hallucinate. Base the response only on the provided form content. "
|
99 |
"End with a section labeled '🧠 Final Analysis' where you summarize key findings the doctor may have missed."
|
100 |
)
|
101 |
|
102 |
+
# Show centered loading message in chatbot
|
103 |
+
updated_history = history + [
|
104 |
+
{"role": "user", "content": message},
|
105 |
+
{"role": "assistant", "content": "<div style='text-align:center'>⏳ Processing... Please wait while I analyze the files.</div>"}
|
106 |
+
]
|
107 |
+
yield updated_history
|
108 |
|
109 |
+
try:
|
110 |
extracted_text = ""
|
111 |
if uploaded_files and isinstance(uploaded_files, list):
|
112 |
total_files = len(uploaded_files)
|
|
|
148 |
all_responses += update
|
149 |
|
150 |
all_responses = sanitize_utf8(all_responses.strip())
|
151 |
+
|
152 |
+
# Replace the temporary loading message with the final answer
|
153 |
+
updated_history[-1] = {"role": "assistant", "content": all_responses}
|
154 |
+
yield updated_history
|
|
|
155 |
|
156 |
except Exception as chat_error:
|
157 |
print(f"Chat error: {chat_error}")
|
158 |
+
updated_history[-1] = {
|
159 |
+
"role": "assistant",
|
160 |
+
"content": "❌ An error occurred while processing your request. Please try again."
|
161 |
+
}
|
162 |
+
yield updated_history
|
163 |
|
164 |
inputs = [message_input, chatbot, conversation_state, file_upload]
|
165 |
send_button.click(fn=handle_chat, inputs=inputs, outputs=chatbot)
|
|
|
171 |
["Is there anything abnormal in the attached blood work report?"]
|
172 |
], inputs=message_input)
|
173 |
|
174 |
+
return demo
|