Update app.py
Browse files
app.py
CHANGED
@@ -131,8 +131,137 @@ def init_agent():
|
|
131 |
return agent
|
132 |
|
133 |
def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tuple[List[Dict[str, str]], Union[str, None]]:
|
134 |
-
|
135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
def create_ui(agent):
|
138 |
"""Create the Gradio UI for the patient history analysis tool."""
|
|
|
131 |
return agent
|
132 |
|
133 |
def process_final_report(agent, file, chatbot_state: List[Dict[str, str]]) -> Tuple[List[Dict[str, str]], Union[str, None]]:
|
134 |
+
"""Process the Excel file and generate a final report."""
|
135 |
+
messages = chatbot_state if chatbot_state else []
|
136 |
+
report_path = None
|
137 |
+
|
138 |
+
if file is None or not hasattr(file, "name"):
|
139 |
+
messages.append({"role": "assistant", "content": "β Please upload a valid Excel file before analyzing."})
|
140 |
+
return messages, report_path
|
141 |
+
|
142 |
+
try:
|
143 |
+
messages.append({"role": "user", "content": f"Processing Excel file: {os.path.basename(file.name)}"})
|
144 |
+
messages.append({"role": "assistant", "content": "β³ Extracting and analyzing data..."})
|
145 |
+
|
146 |
+
extracted_text = extract_text_from_excel(file.name)
|
147 |
+
chunks = split_text_into_chunks(extracted_text, max_tokens=MAX_CHUNK_TOKENS)
|
148 |
+
chunk_responses = []
|
149 |
+
|
150 |
+
for i, chunk in enumerate(chunks):
|
151 |
+
messages.append({"role": "assistant", "content": f"π Analyzing chunk {i+1}/{len(chunks)}..."})
|
152 |
+
prompt = build_prompt_from_text(chunk)
|
153 |
+
prompt_tokens = estimate_tokens(prompt)
|
154 |
+
|
155 |
+
if prompt_tokens > MAX_MODEL_TOKENS:
|
156 |
+
messages.append({"role": "assistant", "content": f"β Chunk {i+1} prompt too long ({prompt_tokens} tokens). Skipping..."})
|
157 |
+
continue
|
158 |
+
|
159 |
+
response = ""
|
160 |
+
try:
|
161 |
+
for result in agent.run_gradio_chat(
|
162 |
+
message=prompt,
|
163 |
+
history=[],
|
164 |
+
temperature=0.2,
|
165 |
+
max_new_tokens=MAX_NEW_TOKENS,
|
166 |
+
max_token=MAX_MODEL_TOKENS,
|
167 |
+
call_agent=False,
|
168 |
+
conversation=[],
|
169 |
+
):
|
170 |
+
if isinstance(result, str):
|
171 |
+
response += result
|
172 |
+
elif hasattr(result, "content"):
|
173 |
+
response += result.content
|
174 |
+
elif isinstance(result, list):
|
175 |
+
for r in result:
|
176 |
+
if hasattr(r, "content"):
|
177 |
+
response += r.content
|
178 |
+
except Exception as e:
|
179 |
+
messages.append({"role": "assistant", "content": f"β Error analyzing chunk {i+1}: {str(e)}"})
|
180 |
+
continue
|
181 |
+
|
182 |
+
chunk_responses.append(clean_response(response))
|
183 |
+
messages.append({"role": "assistant", "content": f"β
Chunk {i+1} analysis complete"})
|
184 |
+
|
185 |
+
if not chunk_responses:
|
186 |
+
messages.append({"role": "assistant", "content": "β No valid chunk responses to summarize."})
|
187 |
+
return messages, report_path
|
188 |
+
|
189 |
+
summary = ""
|
190 |
+
current_summary_tokens = 0
|
191 |
+
for i, response in enumerate(chunk_responses):
|
192 |
+
response_tokens = estimate_tokens(response)
|
193 |
+
if current_summary_tokens + response_tokens > MAX_MODEL_TOKENS - PROMPT_OVERHEAD - MAX_NEW_TOKENS:
|
194 |
+
summary_prompt = f"Summarize the following analysis:\n\n{summary}\n\nProvide a concise summary."
|
195 |
+
summary_response = ""
|
196 |
+
try:
|
197 |
+
for result in agent.run_gradio_chat(
|
198 |
+
message=summary_prompt,
|
199 |
+
history=[],
|
200 |
+
temperature=0.2,
|
201 |
+
max_new_tokens=MAX_NEW_TOKENS,
|
202 |
+
max_token=MAX_MODEL_TOKENS,
|
203 |
+
call_agent=False,
|
204 |
+
conversation=[],
|
205 |
+
):
|
206 |
+
if isinstance(result, str):
|
207 |
+
summary_response += result
|
208 |
+
elif hasattr(result, "content"):
|
209 |
+
summary_response += result.content
|
210 |
+
elif isinstance(result, list):
|
211 |
+
for r in result:
|
212 |
+
if hasattr(r, "content"):
|
213 |
+
summary_response += r.content
|
214 |
+
summary = clean_response(summary_response)
|
215 |
+
current_summary_tokens = estimate_tokens(summary)
|
216 |
+
except Exception as e:
|
217 |
+
messages.append({"role": "assistant", "content": f"β Error summarizing intermediate results: {str(e)}"})
|
218 |
+
return messages, report_path
|
219 |
+
|
220 |
+
summary += f"\n\n### Chunk {i+1} Analysis\n{response}"
|
221 |
+
current_summary_tokens += response_tokens
|
222 |
+
|
223 |
+
final_prompt = f"Summarize the key findings from the following analyses:\n\n{summary}"
|
224 |
+
messages.append({"role": "assistant", "content": "π Generating final report..."})
|
225 |
+
|
226 |
+
final_report_text = ""
|
227 |
+
try:
|
228 |
+
for result in agent.run_gradio_chat(
|
229 |
+
message=final_prompt,
|
230 |
+
history=[],
|
231 |
+
temperature=0.2,
|
232 |
+
max_new_tokens=MAX_NEW_TOKENS,
|
233 |
+
max_token=MAX_MODEL_TOKENS,
|
234 |
+
call_agent=False,
|
235 |
+
conversation=[],
|
236 |
+
):
|
237 |
+
if isinstance(result, str):
|
238 |
+
final_report_text += result
|
239 |
+
elif hasattr(result, "content"):
|
240 |
+
final_report_text += result.content
|
241 |
+
elif isinstance(result, list):
|
242 |
+
for r in result:
|
243 |
+
if hasattr(r, "content"):
|
244 |
+
final_report_text += r.content
|
245 |
+
except Exception as e:
|
246 |
+
messages.append({"role": "assistant", "content": f"β Error generating final report: {str(e)}"})
|
247 |
+
return messages, report_path
|
248 |
+
|
249 |
+
final_report = f"# π§ Final Patient Report\n\n{clean_response(final_report_text)}"
|
250 |
+
messages[-1]["content"] = f"π Final Report:\n\n{clean_response(final_report_text)}"
|
251 |
+
|
252 |
+
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
253 |
+
report_path = os.path.join(report_dir, f"report_{timestamp}.md")
|
254 |
+
|
255 |
+
with open(report_path, 'w') as f:
|
256 |
+
f.write(final_report)
|
257 |
+
|
258 |
+
messages.append({"role": "assistant", "content": f"β
Report generated and saved: report_{timestamp}.md"})
|
259 |
+
|
260 |
+
except Exception as e:
|
261 |
+
messages.append({"role": "assistant", "content": f"β Error processing file: {str(e)}"})
|
262 |
+
|
263 |
+
return messages, report_path
|
264 |
+
|
265 |
|
266 |
def create_ui(agent):
|
267 |
"""Create the Gradio UI for the patient history analysis tool."""
|