Spaces:
Sleeping
Sleeping
Update app/gradio_interface.py
Browse files- app/gradio_interface.py +339 -339
app/gradio_interface.py
CHANGED
@@ -1,339 +1,339 @@
|
|
1 |
-
# app/gradio_interface.py
|
2 |
-
import os
|
3 |
-
import gradio as gr
|
4 |
-
import time
|
5 |
-
import threading
|
6 |
-
import tempfile
|
7 |
-
import shutil
|
8 |
-
from typing import Dict, List, Optional, Tuple, Union, Any
|
9 |
-
import json
|
10 |
-
import markdown
|
11 |
-
import matplotlib.pyplot as plt
|
12 |
-
import numpy as np
|
13 |
-
from PIL import Image
|
14 |
-
import io
|
15 |
-
import base64
|
16 |
-
|
17 |
-
class GradioInterface:
|
18 |
-
def __init__(self, orchestrator):
|
19 |
-
"""Initialize the Gradio interface with the orchestrator."""
|
20 |
-
self.orchestrator = orchestrator
|
21 |
-
self.active_sessions = {}
|
22 |
-
self.processing_threads = {}
|
23 |
-
|
24 |
-
# Create temporary directory for file uploads
|
25 |
-
self.temp_dir = tempfile.mkdtemp()
|
26 |
-
self.text_dir = os.path.join(self.temp_dir, "texts")
|
27 |
-
self.image_dir = os.path.join(self.temp_dir, "images")
|
28 |
-
os.makedirs(self.text_dir, exist_ok=True)
|
29 |
-
os.makedirs(self.image_dir, exist_ok=True)
|
30 |
-
|
31 |
-
def create_interface(self):
|
32 |
-
"""Create and return the Gradio interface."""
|
33 |
-
with gr.Blocks(title="Deep Dive Analysis with Sustainable AI",
|
34 |
-
theme=gr.themes.Soft(primary_hue="teal")) as interface:
|
35 |
-
|
36 |
-
# Session management
|
37 |
-
session_id = gr.State("")
|
38 |
-
processing_status = gr.State("idle")
|
39 |
-
result_data = gr.State(None)
|
40 |
-
|
41 |
-
gr.Markdown("# 🌿 Deep Dive Analysis with Sustainable AI")
|
42 |
-
gr.Markdown("Upload text files and images to analyze a topic in depth, with optimized AI processing.")
|
43 |
-
|
44 |
-
with gr.Row():
|
45 |
-
with gr.Column(scale=2):
|
46 |
-
# Input section
|
47 |
-
with gr.
|
48 |
-
gr.Markdown("## 📝 Input")
|
49 |
-
topic_input = gr.Textbox(label="Topic for Deep Dive", placeholder="Enter a topic to analyze...")
|
50 |
-
|
51 |
-
with gr.Row():
|
52 |
-
text_files = gr.File(label="Upload Text Files", file_count="multiple", file_types=[".txt", ".md", ".pdf", ".docx"])
|
53 |
-
image_files = gr.File(label="Upload Images", file_count="multiple", file_types=["image"])
|
54 |
-
|
55 |
-
analyze_btn = gr.Button("Start Analysis", variant="primary")
|
56 |
-
status_msg = gr.Markdown("Ready to analyze.")
|
57 |
-
|
58 |
-
with gr.Column(scale=1):
|
59 |
-
# Sustainability metrics
|
60 |
-
with gr.
|
61 |
-
gr.Markdown("## 📊 Sustainability Metrics")
|
62 |
-
metrics_display = gr.Markdown("No metrics available yet.")
|
63 |
-
metrics_chart = gr.Plot(label="Energy Usage")
|
64 |
-
update_metrics_btn = gr.Button("Update Metrics")
|
65 |
-
|
66 |
-
# Results section
|
67 |
-
with gr.
|
68 |
-
gr.Markdown("## 📑 Analysis Results")
|
69 |
-
|
70 |
-
with gr.Tabs() as tabs:
|
71 |
-
with gr.TabItem("Executive Summary"):
|
72 |
-
exec_summary = gr.Markdown("No results available yet.")
|
73 |
-
confidence_indicator = gr.Markdown("")
|
74 |
-
|
75 |
-
with gr.TabItem("Detailed Report"):
|
76 |
-
detailed_report = gr.Markdown("No detailed report available yet.")
|
77 |
-
|
78 |
-
with gr.TabItem("Text Analysis"):
|
79 |
-
text_analysis = gr.Markdown("No text analysis available yet.")
|
80 |
-
|
81 |
-
with gr.TabItem("Image Analysis"):
|
82 |
-
with gr.Row():
|
83 |
-
image_gallery = gr.Gallery(label="Analyzed Images")
|
84 |
-
image_analysis = gr.Markdown("No image analysis available yet.")
|
85 |
-
|
86 |
-
with gr.TabItem("Raw Data"):
|
87 |
-
raw_json = gr.JSON(None)
|
88 |
-
|
89 |
-
# Define event handlers
|
90 |
-
def initialize_session():
|
91 |
-
"""Initialize a new session."""
|
92 |
-
new_session = self.orchestrator.create_session()
|
93 |
-
return new_session, "idle", None
|
94 |
-
|
95 |
-
def process_files(session, topic, text_files, image_files, status):
|
96 |
-
"""Process uploaded files and start analysis."""
|
97 |
-
if not topic:
|
98 |
-
return session, "error", "Please enter a topic for analysis.", None
|
99 |
-
|
100 |
-
if not text_files and not image_files:
|
101 |
-
return session, "error", "Please upload at least one text file or image.", None
|
102 |
-
|
103 |
-
# Save uploaded files to temp directories
|
104 |
-
text_file_paths = []
|
105 |
-
if text_files:
|
106 |
-
for file in text_files:
|
107 |
-
dest_path = os.path.join(self.text_dir, os.path.basename(file.name))
|
108 |
-
shutil.copy(file.name, dest_path)
|
109 |
-
text_file_paths.append(dest_path)
|
110 |
-
|
111 |
-
image_file_paths = []
|
112 |
-
if image_files:
|
113 |
-
for file in image_files:
|
114 |
-
dest_path = os.path.join(self.image_dir, os.path.basename(file.name))
|
115 |
-
shutil.copy(file.name, dest_path)
|
116 |
-
image_file_paths.append(dest_path)
|
117 |
-
|
118 |
-
# Start processing in a separate thread to avoid blocking the UI
|
119 |
-
def process_thread():
|
120 |
-
try:
|
121 |
-
# Use synchronized workflow for better control
|
122 |
-
result = self.orchestrator.coordinate_workflow_with_synchronization(
|
123 |
-
session, topic, text_file_paths, image_file_paths)
|
124 |
-
|
125 |
-
# Store result for UI access
|
126 |
-
self.active_sessions[session] = result
|
127 |
-
except Exception as e:
|
128 |
-
self.active_sessions[session] = {"error": str(e), "status": "error"}
|
129 |
-
|
130 |
-
# Start processing thread
|
131 |
-
thread = threading.Thread(target=process_thread)
|
132 |
-
thread.daemon = True
|
133 |
-
thread.start()
|
134 |
-
self.processing_threads[session] = thread
|
135 |
-
|
136 |
-
return session, "processing", "Analysis in progress... This may take a few minutes.", None
|
137 |
-
|
138 |
-
def check_status(session, status):
|
139 |
-
"""Check the status of the current processing job."""
|
140 |
-
if session and session in self.active_sessions:
|
141 |
-
result = self.active_sessions[session]
|
142 |
-
|
143 |
-
if isinstance(result, dict):
|
144 |
-
if "error" in result:
|
145 |
-
return "error", f"Error: {result['error']}", result
|
146 |
-
elif result.get("status") == "completed":
|
147 |
-
return "completed", "Analysis completed successfully!", result
|
148 |
-
|
149 |
-
if status == "processing":
|
150 |
-
return status, "Analysis in progress... This may take a few minutes.", None
|
151 |
-
|
152 |
-
return status, "Ready to analyze.", None
|
153 |
-
|
154 |
-
def update_results(result_data):
|
155 |
-
"""Update the UI with results."""
|
156 |
-
if not result_data:
|
157 |
-
return ("No results available yet.",
|
158 |
-
"",
|
159 |
-
"No detailed report available yet.",
|
160 |
-
"No text analysis available yet.",
|
161 |
-
[],
|
162 |
-
"No image analysis available yet.",
|
163 |
-
None)
|
164 |
-
|
165 |
-
# Extract results
|
166 |
-
exec_summary_text = "No executive summary available."
|
167 |
-
confidence_text = ""
|
168 |
-
detailed_report_text = "No detailed report available."
|
169 |
-
text_analysis_text = "No text analysis available."
|
170 |
-
image_list = []
|
171 |
-
image_analysis_text = "No image analysis available."
|
172 |
-
|
173 |
-
# Process report data
|
174 |
-
if "report" in result_data:
|
175 |
-
report = result_data["report"]
|
176 |
-
|
177 |
-
# Executive summary
|
178 |
-
if "executive_summary" in report:
|
179 |
-
exec_summary_text = report["executive_summary"]
|
180 |
-
|
181 |
-
# Confidence statement
|
182 |
-
if "confidence_statement" in report:
|
183 |
-
confidence_level = report.get("confidence_level", "unknown")
|
184 |
-
confidence_text = f"**Confidence Level: {confidence_level.title()}**\n\n"
|
185 |
-
confidence_text += report["confidence_statement"]
|
186 |
-
|
187 |
-
# Detailed report
|
188 |
-
if "detailed_report" in report:
|
189 |
-
detailed_report_text = report["detailed_report"]
|
190 |
-
|
191 |
-
# Process text analysis
|
192 |
-
if "results" in result_data and "text_analysis" in result_data["results"]:
|
193 |
-
text_data = result_data["results"]["text_analysis"]
|
194 |
-
|
195 |
-
if "document_analyses" in text_data:
|
196 |
-
text_analysis_text = f"### Text Analysis Results\n\n"
|
197 |
-
text_analysis_text += f"Found {text_data.get('relevant_documents', 0)} relevant documents out of {text_data.get('total_documents', 0)}.\n\n"
|
198 |
-
|
199 |
-
for i, doc in enumerate(text_data["document_analyses"]):
|
200 |
-
text_analysis_text += f"#### Document {i+1}: {doc.get('filename', 'Unknown')}\n\n"
|
201 |
-
text_analysis_text += f"Relevance: {doc.get('relevance_score', 0):.2f}\n\n"
|
202 |
-
text_analysis_text += f"{doc.get('summary', 'No summary available.')}\n\n"
|
203 |
-
|
204 |
-
# Process image analysis
|
205 |
-
if "results" in result_data and "image_analysis" in result_data["results"]:
|
206 |
-
img_data = result_data["results"]["image_analysis"]
|
207 |
-
|
208 |
-
if "image_analyses" in img_data:
|
209 |
-
image_analysis_text = f"### Image Analysis Results\n\n"
|
210 |
-
image_analysis_text += f"Found {img_data.get('relevant_images', 0)} relevant images out of {img_data.get('total_images', 0)}.\n\n"
|
211 |
-
|
212 |
-
# Get processed images for gallery
|
213 |
-
if "processed_images" in img_data:
|
214 |
-
for img_info in img_data["processed_images"]:
|
215 |
-
if img_info.get("is_relevant", False):
|
216 |
-
try:
|
217 |
-
img_path = img_info.get("filepath", "")
|
218 |
-
if os.path.exists(img_path):
|
219 |
-
# Add to gallery
|
220 |
-
image_list.append((img_path, img_info.get("caption", "No caption")))
|
221 |
-
except Exception as e:
|
222 |
-
print(f"Error loading image: {e}")
|
223 |
-
|
224 |
-
# Format analysis text
|
225 |
-
for i, img in enumerate(img_data["image_analyses"]):
|
226 |
-
image_analysis_text += f"#### Image {i+1}: {img.get('filename', 'Unknown')}\n\n"
|
227 |
-
image_analysis_text += f"Caption: {img.get('caption', 'No caption available.')}\n\n"
|
228 |
-
image_analysis_text += f"Relevance: {img.get('relevance_score', 0):.2f}\n\n"
|
229 |
-
image_analysis_text += f"Model used: {img.get('model_used', 'unknown')}\n\n"
|
230 |
-
|
231 |
-
return (exec_summary_text,
|
232 |
-
confidence_text,
|
233 |
-
detailed_report_text,
|
234 |
-
text_analysis_text,
|
235 |
-
image_list,
|
236 |
-
image_analysis_text,
|
237 |
-
result_data)
|
238 |
-
|
239 |
-
def update_metrics():
|
240 |
-
"""Update sustainability metrics display."""
|
241 |
-
metrics = self.orchestrator.get_sustainability_metrics()
|
242 |
-
|
243 |
-
if "error" in metrics:
|
244 |
-
return "No metrics available: " + metrics["error"], None
|
245 |
-
|
246 |
-
# Format metrics for display
|
247 |
-
metrics_text = "### Sustainability Metrics\n\n"
|
248 |
-
|
249 |
-
# Energy usage
|
250 |
-
energy_usage = metrics.get("energy_usage", {}).get("total", 0)
|
251 |
-
metrics_text += f"**Total Energy Usage**: {energy_usage:.6f} Wh\n\n"
|
252 |
-
|
253 |
-
# Carbon footprint
|
254 |
-
carbon = metrics.get("carbon_footprint_kg", 0)
|
255 |
-
metrics_text += f"**Carbon Footprint**: {carbon:.6f} kg CO₂\n\n"
|
256 |
-
|
257 |
-
# Optimization gains
|
258 |
-
opt_gains = metrics.get("optimization_gains", {})
|
259 |
-
tokens_saved = opt_gains.get("tokens_saved", 0)
|
260 |
-
tokens_saved_pct = opt_gains.get("tokens_saved_pct", 0)
|
261 |
-
energy_saved = opt_gains.get("total_energy_saved", 0)
|
262 |
-
|
263 |
-
metrics_text += f"**Tokens Saved**: {tokens_saved} ({tokens_saved_pct:.1f}%)\n\n"
|
264 |
-
metrics_text += f"**Energy Saved**: {energy_saved:.6f} Wh\n\n"
|
265 |
-
|
266 |
-
# Environmental equivalents
|
267 |
-
env_equiv = metrics.get("environmental_equivalents", {})
|
268 |
-
if env_equiv:
|
269 |
-
metrics_text += "### Environmental Impact\n\n"
|
270 |
-
for impact, value in env_equiv.items():
|
271 |
-
name = impact.replace("_", " ").title()
|
272 |
-
metrics_text += f"**{name}**: {value:.2f}\n\n"
|
273 |
-
|
274 |
-
# Create chart
|
275 |
-
fig, ax = plt.subplots(figsize=(6, 4))
|
276 |
-
|
277 |
-
# Energy by model
|
278 |
-
energy_by_model = metrics.get("energy_usage", {}).get("by_model", {})
|
279 |
-
if energy_by_model:
|
280 |
-
models = list(energy_by_model.keys())
|
281 |
-
values = list(energy_by_model.values())
|
282 |
-
|
283 |
-
# Shorten model names for display
|
284 |
-
short_names = [m.split("/")[-1] if "/" in m else m for m in models]
|
285 |
-
|
286 |
-
ax.bar(short_names, values)
|
287 |
-
ax.set_ylabel("Energy (Wh)")
|
288 |
-
ax.set_title("Energy Usage by Model")
|
289 |
-
plt.xticks(rotation=45, ha="right")
|
290 |
-
plt.tight_layout()
|
291 |
-
|
292 |
-
return metrics_text, fig
|
293 |
-
|
294 |
-
# Connect event handlers
|
295 |
-
session_id = gr.on_load(initialize_session)[0]
|
296 |
-
|
297 |
-
analyze_btn.click(
|
298 |
-
process_files,
|
299 |
-
inputs=[session_id, topic_input, text_files, image_files, processing_status],
|
300 |
-
outputs=[session_id, processing_status, status_msg, result_data]
|
301 |
-
)
|
302 |
-
|
303 |
-
# Periodic status check
|
304 |
-
gr.on(
|
305 |
-
"change",
|
306 |
-
lambda s, st: check_status(s, st),
|
307 |
-
inputs=[session_id, processing_status],
|
308 |
-
outputs=[processing_status, status_msg, result_data],
|
309 |
-
every=2 # Check every 2 seconds
|
310 |
-
)
|
311 |
-
|
312 |
-
# Update results when result_data changes
|
313 |
-
result_data.change(
|
314 |
-
update_results,
|
315 |
-
inputs=[result_data],
|
316 |
-
outputs=[exec_summary, confidence_indicator, detailed_report, text_analysis,
|
317 |
-
image_gallery, image_analysis, raw_json]
|
318 |
-
)
|
319 |
-
|
320 |
-
# Update metrics
|
321 |
-
update_metrics_btn.click(
|
322 |
-
update_metrics,
|
323 |
-
inputs=[],
|
324 |
-
outputs=[metrics_display, metrics_chart]
|
325 |
-
)
|
326 |
-
|
327 |
-
return interface
|
328 |
-
|
329 |
-
def launch(self, **kwargs):
|
330 |
-
"""Launch the Gradio interface."""
|
331 |
-
interface = self.create_interface()
|
332 |
-
interface.launch(**kwargs)
|
333 |
-
|
334 |
-
def cleanup(self):
|
335 |
-
"""Clean up temporary files."""
|
336 |
-
try:
|
337 |
-
shutil.rmtree(self.temp_dir)
|
338 |
-
except Exception as e:
|
339 |
-
print(f"Error cleaning up temp files: {e}")
|
|
|
1 |
+
# app/gradio_interface.py
|
2 |
+
import os
|
3 |
+
import gradio as gr
|
4 |
+
import time
|
5 |
+
import threading
|
6 |
+
import tempfile
|
7 |
+
import shutil
|
8 |
+
from typing import Dict, List, Optional, Tuple, Union, Any
|
9 |
+
import json
|
10 |
+
import markdown
|
11 |
+
import matplotlib.pyplot as plt
|
12 |
+
import numpy as np
|
13 |
+
from PIL import Image
|
14 |
+
import io
|
15 |
+
import base64
|
16 |
+
|
17 |
+
class GradioInterface:
|
18 |
+
def __init__(self, orchestrator):
|
19 |
+
"""Initialize the Gradio interface with the orchestrator."""
|
20 |
+
self.orchestrator = orchestrator
|
21 |
+
self.active_sessions = {}
|
22 |
+
self.processing_threads = {}
|
23 |
+
|
24 |
+
# Create temporary directory for file uploads
|
25 |
+
self.temp_dir = tempfile.mkdtemp()
|
26 |
+
self.text_dir = os.path.join(self.temp_dir, "texts")
|
27 |
+
self.image_dir = os.path.join(self.temp_dir, "images")
|
28 |
+
os.makedirs(self.text_dir, exist_ok=True)
|
29 |
+
os.makedirs(self.image_dir, exist_ok=True)
|
30 |
+
|
31 |
+
def create_interface(self):
|
32 |
+
"""Create and return the Gradio interface."""
|
33 |
+
with gr.Blocks(title="Deep Dive Analysis with Sustainable AI",
|
34 |
+
theme=gr.themes.Soft(primary_hue="teal")) as interface:
|
35 |
+
|
36 |
+
# Session management
|
37 |
+
session_id = gr.State("")
|
38 |
+
processing_status = gr.State("idle")
|
39 |
+
result_data = gr.State(None)
|
40 |
+
|
41 |
+
gr.Markdown("# 🌿 Deep Dive Analysis with Sustainable AI")
|
42 |
+
gr.Markdown("Upload text files and images to analyze a topic in depth, with optimized AI processing.")
|
43 |
+
|
44 |
+
with gr.Row():
|
45 |
+
with gr.Column(scale=2):
|
46 |
+
# Input section
|
47 |
+
with gr.Blocks():
|
48 |
+
gr.Markdown("## 📝 Input")
|
49 |
+
topic_input = gr.Textbox(label="Topic for Deep Dive", placeholder="Enter a topic to analyze...")
|
50 |
+
|
51 |
+
with gr.Row():
|
52 |
+
text_files = gr.File(label="Upload Text Files", file_count="multiple", file_types=[".txt", ".md", ".pdf", ".docx"])
|
53 |
+
image_files = gr.File(label="Upload Images", file_count="multiple", file_types=["image"])
|
54 |
+
|
55 |
+
analyze_btn = gr.Button("Start Analysis", variant="primary")
|
56 |
+
status_msg = gr.Markdown("Ready to analyze.")
|
57 |
+
|
58 |
+
with gr.Column(scale=1):
|
59 |
+
# Sustainability metrics
|
60 |
+
with gr.Blocks():
|
61 |
+
gr.Markdown("## 📊 Sustainability Metrics")
|
62 |
+
metrics_display = gr.Markdown("No metrics available yet.")
|
63 |
+
metrics_chart = gr.Plot(label="Energy Usage")
|
64 |
+
update_metrics_btn = gr.Button("Update Metrics")
|
65 |
+
|
66 |
+
# Results section
|
67 |
+
with gr.Blocks():
|
68 |
+
gr.Markdown("## 📑 Analysis Results")
|
69 |
+
|
70 |
+
with gr.Tabs() as tabs:
|
71 |
+
with gr.TabItem("Executive Summary"):
|
72 |
+
exec_summary = gr.Markdown("No results available yet.")
|
73 |
+
confidence_indicator = gr.Markdown("")
|
74 |
+
|
75 |
+
with gr.TabItem("Detailed Report"):
|
76 |
+
detailed_report = gr.Markdown("No detailed report available yet.")
|
77 |
+
|
78 |
+
with gr.TabItem("Text Analysis"):
|
79 |
+
text_analysis = gr.Markdown("No text analysis available yet.")
|
80 |
+
|
81 |
+
with gr.TabItem("Image Analysis"):
|
82 |
+
with gr.Row():
|
83 |
+
image_gallery = gr.Gallery(label="Analyzed Images")
|
84 |
+
image_analysis = gr.Markdown("No image analysis available yet.")
|
85 |
+
|
86 |
+
with gr.TabItem("Raw Data"):
|
87 |
+
raw_json = gr.JSON(None)
|
88 |
+
|
89 |
+
# Define event handlers
|
90 |
+
def initialize_session():
|
91 |
+
"""Initialize a new session."""
|
92 |
+
new_session = self.orchestrator.create_session()
|
93 |
+
return new_session, "idle", None
|
94 |
+
|
95 |
+
def process_files(session, topic, text_files, image_files, status):
|
96 |
+
"""Process uploaded files and start analysis."""
|
97 |
+
if not topic:
|
98 |
+
return session, "error", "Please enter a topic for analysis.", None
|
99 |
+
|
100 |
+
if not text_files and not image_files:
|
101 |
+
return session, "error", "Please upload at least one text file or image.", None
|
102 |
+
|
103 |
+
# Save uploaded files to temp directories
|
104 |
+
text_file_paths = []
|
105 |
+
if text_files:
|
106 |
+
for file in text_files:
|
107 |
+
dest_path = os.path.join(self.text_dir, os.path.basename(file.name))
|
108 |
+
shutil.copy(file.name, dest_path)
|
109 |
+
text_file_paths.append(dest_path)
|
110 |
+
|
111 |
+
image_file_paths = []
|
112 |
+
if image_files:
|
113 |
+
for file in image_files:
|
114 |
+
dest_path = os.path.join(self.image_dir, os.path.basename(file.name))
|
115 |
+
shutil.copy(file.name, dest_path)
|
116 |
+
image_file_paths.append(dest_path)
|
117 |
+
|
118 |
+
# Start processing in a separate thread to avoid blocking the UI
|
119 |
+
def process_thread():
|
120 |
+
try:
|
121 |
+
# Use synchronized workflow for better control
|
122 |
+
result = self.orchestrator.coordinate_workflow_with_synchronization(
|
123 |
+
session, topic, text_file_paths, image_file_paths)
|
124 |
+
|
125 |
+
# Store result for UI access
|
126 |
+
self.active_sessions[session] = result
|
127 |
+
except Exception as e:
|
128 |
+
self.active_sessions[session] = {"error": str(e), "status": "error"}
|
129 |
+
|
130 |
+
# Start processing thread
|
131 |
+
thread = threading.Thread(target=process_thread)
|
132 |
+
thread.daemon = True
|
133 |
+
thread.start()
|
134 |
+
self.processing_threads[session] = thread
|
135 |
+
|
136 |
+
return session, "processing", "Analysis in progress... This may take a few minutes.", None
|
137 |
+
|
138 |
+
def check_status(session, status):
|
139 |
+
"""Check the status of the current processing job."""
|
140 |
+
if session and session in self.active_sessions:
|
141 |
+
result = self.active_sessions[session]
|
142 |
+
|
143 |
+
if isinstance(result, dict):
|
144 |
+
if "error" in result:
|
145 |
+
return "error", f"Error: {result['error']}", result
|
146 |
+
elif result.get("status") == "completed":
|
147 |
+
return "completed", "Analysis completed successfully!", result
|
148 |
+
|
149 |
+
if status == "processing":
|
150 |
+
return status, "Analysis in progress... This may take a few minutes.", None
|
151 |
+
|
152 |
+
return status, "Ready to analyze.", None
|
153 |
+
|
154 |
+
def update_results(result_data):
|
155 |
+
"""Update the UI with results."""
|
156 |
+
if not result_data:
|
157 |
+
return ("No results available yet.",
|
158 |
+
"",
|
159 |
+
"No detailed report available yet.",
|
160 |
+
"No text analysis available yet.",
|
161 |
+
[],
|
162 |
+
"No image analysis available yet.",
|
163 |
+
None)
|
164 |
+
|
165 |
+
# Extract results
|
166 |
+
exec_summary_text = "No executive summary available."
|
167 |
+
confidence_text = ""
|
168 |
+
detailed_report_text = "No detailed report available."
|
169 |
+
text_analysis_text = "No text analysis available."
|
170 |
+
image_list = []
|
171 |
+
image_analysis_text = "No image analysis available."
|
172 |
+
|
173 |
+
# Process report data
|
174 |
+
if "report" in result_data:
|
175 |
+
report = result_data["report"]
|
176 |
+
|
177 |
+
# Executive summary
|
178 |
+
if "executive_summary" in report:
|
179 |
+
exec_summary_text = report["executive_summary"]
|
180 |
+
|
181 |
+
# Confidence statement
|
182 |
+
if "confidence_statement" in report:
|
183 |
+
confidence_level = report.get("confidence_level", "unknown")
|
184 |
+
confidence_text = f"**Confidence Level: {confidence_level.title()}**\n\n"
|
185 |
+
confidence_text += report["confidence_statement"]
|
186 |
+
|
187 |
+
# Detailed report
|
188 |
+
if "detailed_report" in report:
|
189 |
+
detailed_report_text = report["detailed_report"]
|
190 |
+
|
191 |
+
# Process text analysis
|
192 |
+
if "results" in result_data and "text_analysis" in result_data["results"]:
|
193 |
+
text_data = result_data["results"]["text_analysis"]
|
194 |
+
|
195 |
+
if "document_analyses" in text_data:
|
196 |
+
text_analysis_text = f"### Text Analysis Results\n\n"
|
197 |
+
text_analysis_text += f"Found {text_data.get('relevant_documents', 0)} relevant documents out of {text_data.get('total_documents', 0)}.\n\n"
|
198 |
+
|
199 |
+
for i, doc in enumerate(text_data["document_analyses"]):
|
200 |
+
text_analysis_text += f"#### Document {i+1}: {doc.get('filename', 'Unknown')}\n\n"
|
201 |
+
text_analysis_text += f"Relevance: {doc.get('relevance_score', 0):.2f}\n\n"
|
202 |
+
text_analysis_text += f"{doc.get('summary', 'No summary available.')}\n\n"
|
203 |
+
|
204 |
+
# Process image analysis
|
205 |
+
if "results" in result_data and "image_analysis" in result_data["results"]:
|
206 |
+
img_data = result_data["results"]["image_analysis"]
|
207 |
+
|
208 |
+
if "image_analyses" in img_data:
|
209 |
+
image_analysis_text = f"### Image Analysis Results\n\n"
|
210 |
+
image_analysis_text += f"Found {img_data.get('relevant_images', 0)} relevant images out of {img_data.get('total_images', 0)}.\n\n"
|
211 |
+
|
212 |
+
# Get processed images for gallery
|
213 |
+
if "processed_images" in img_data:
|
214 |
+
for img_info in img_data["processed_images"]:
|
215 |
+
if img_info.get("is_relevant", False):
|
216 |
+
try:
|
217 |
+
img_path = img_info.get("filepath", "")
|
218 |
+
if os.path.exists(img_path):
|
219 |
+
# Add to gallery
|
220 |
+
image_list.append((img_path, img_info.get("caption", "No caption")))
|
221 |
+
except Exception as e:
|
222 |
+
print(f"Error loading image: {e}")
|
223 |
+
|
224 |
+
# Format analysis text
|
225 |
+
for i, img in enumerate(img_data["image_analyses"]):
|
226 |
+
image_analysis_text += f"#### Image {i+1}: {img.get('filename', 'Unknown')}\n\n"
|
227 |
+
image_analysis_text += f"Caption: {img.get('caption', 'No caption available.')}\n\n"
|
228 |
+
image_analysis_text += f"Relevance: {img.get('relevance_score', 0):.2f}\n\n"
|
229 |
+
image_analysis_text += f"Model used: {img.get('model_used', 'unknown')}\n\n"
|
230 |
+
|
231 |
+
return (exec_summary_text,
|
232 |
+
confidence_text,
|
233 |
+
detailed_report_text,
|
234 |
+
text_analysis_text,
|
235 |
+
image_list,
|
236 |
+
image_analysis_text,
|
237 |
+
result_data)
|
238 |
+
|
239 |
+
def update_metrics():
|
240 |
+
"""Update sustainability metrics display."""
|
241 |
+
metrics = self.orchestrator.get_sustainability_metrics()
|
242 |
+
|
243 |
+
if "error" in metrics:
|
244 |
+
return "No metrics available: " + metrics["error"], None
|
245 |
+
|
246 |
+
# Format metrics for display
|
247 |
+
metrics_text = "### Sustainability Metrics\n\n"
|
248 |
+
|
249 |
+
# Energy usage
|
250 |
+
energy_usage = metrics.get("energy_usage", {}).get("total", 0)
|
251 |
+
metrics_text += f"**Total Energy Usage**: {energy_usage:.6f} Wh\n\n"
|
252 |
+
|
253 |
+
# Carbon footprint
|
254 |
+
carbon = metrics.get("carbon_footprint_kg", 0)
|
255 |
+
metrics_text += f"**Carbon Footprint**: {carbon:.6f} kg CO₂\n\n"
|
256 |
+
|
257 |
+
# Optimization gains
|
258 |
+
opt_gains = metrics.get("optimization_gains", {})
|
259 |
+
tokens_saved = opt_gains.get("tokens_saved", 0)
|
260 |
+
tokens_saved_pct = opt_gains.get("tokens_saved_pct", 0)
|
261 |
+
energy_saved = opt_gains.get("total_energy_saved", 0)
|
262 |
+
|
263 |
+
metrics_text += f"**Tokens Saved**: {tokens_saved} ({tokens_saved_pct:.1f}%)\n\n"
|
264 |
+
metrics_text += f"**Energy Saved**: {energy_saved:.6f} Wh\n\n"
|
265 |
+
|
266 |
+
# Environmental equivalents
|
267 |
+
env_equiv = metrics.get("environmental_equivalents", {})
|
268 |
+
if env_equiv:
|
269 |
+
metrics_text += "### Environmental Impact\n\n"
|
270 |
+
for impact, value in env_equiv.items():
|
271 |
+
name = impact.replace("_", " ").title()
|
272 |
+
metrics_text += f"**{name}**: {value:.2f}\n\n"
|
273 |
+
|
274 |
+
# Create chart
|
275 |
+
fig, ax = plt.subplots(figsize=(6, 4))
|
276 |
+
|
277 |
+
# Energy by model
|
278 |
+
energy_by_model = metrics.get("energy_usage", {}).get("by_model", {})
|
279 |
+
if energy_by_model:
|
280 |
+
models = list(energy_by_model.keys())
|
281 |
+
values = list(energy_by_model.values())
|
282 |
+
|
283 |
+
# Shorten model names for display
|
284 |
+
short_names = [m.split("/")[-1] if "/" in m else m for m in models]
|
285 |
+
|
286 |
+
ax.bar(short_names, values)
|
287 |
+
ax.set_ylabel("Energy (Wh)")
|
288 |
+
ax.set_title("Energy Usage by Model")
|
289 |
+
plt.xticks(rotation=45, ha="right")
|
290 |
+
plt.tight_layout()
|
291 |
+
|
292 |
+
return metrics_text, fig
|
293 |
+
|
294 |
+
# Connect event handlers
|
295 |
+
session_id = gr.on_load(initialize_session)[0]
|
296 |
+
|
297 |
+
analyze_btn.click(
|
298 |
+
process_files,
|
299 |
+
inputs=[session_id, topic_input, text_files, image_files, processing_status],
|
300 |
+
outputs=[session_id, processing_status, status_msg, result_data]
|
301 |
+
)
|
302 |
+
|
303 |
+
# Periodic status check
|
304 |
+
gr.on(
|
305 |
+
"change",
|
306 |
+
lambda s, st: check_status(s, st),
|
307 |
+
inputs=[session_id, processing_status],
|
308 |
+
outputs=[processing_status, status_msg, result_data],
|
309 |
+
every=2 # Check every 2 seconds
|
310 |
+
)
|
311 |
+
|
312 |
+
# Update results when result_data changes
|
313 |
+
result_data.change(
|
314 |
+
update_results,
|
315 |
+
inputs=[result_data],
|
316 |
+
outputs=[exec_summary, confidence_indicator, detailed_report, text_analysis,
|
317 |
+
image_gallery, image_analysis, raw_json]
|
318 |
+
)
|
319 |
+
|
320 |
+
# Update metrics
|
321 |
+
update_metrics_btn.click(
|
322 |
+
update_metrics,
|
323 |
+
inputs=[],
|
324 |
+
outputs=[metrics_display, metrics_chart]
|
325 |
+
)
|
326 |
+
|
327 |
+
return interface
|
328 |
+
|
329 |
+
def launch(self, **kwargs):
|
330 |
+
"""Launch the Gradio interface."""
|
331 |
+
interface = self.create_interface()
|
332 |
+
interface.launch(**kwargs)
|
333 |
+
|
334 |
+
def cleanup(self):
|
335 |
+
"""Clean up temporary files."""
|
336 |
+
try:
|
337 |
+
shutil.rmtree(self.temp_dir)
|
338 |
+
except Exception as e:
|
339 |
+
print(f"Error cleaning up temp files: {e}")
|