Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,45 +1,80 @@
|
|
1 |
"""Streamlit frontend for the News Summarization application."""
|
2 |
|
3 |
import streamlit as st
|
4 |
-
import requests
|
5 |
import pandas as pd
|
6 |
import json
|
7 |
-
from config import API_BASE_URL
|
8 |
import os
|
9 |
import plotly.express as px
|
10 |
import altair as alt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
|
|
12 |
st.set_page_config(
|
13 |
page_title="News Summarization App",
|
14 |
page_icon="📰",
|
15 |
layout="wide"
|
16 |
)
|
17 |
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
try:
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
if
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
-
|
32 |
-
if 'audio_url' in data:
|
33 |
-
audio_response = requests.get(f"{API_BASE_URL}{data['audio_url']}")
|
34 |
-
if audio_response.status_code == 200:
|
35 |
-
data['audio_content'] = audio_response.content
|
36 |
-
return data
|
37 |
-
else:
|
38 |
-
st.error(f"Error from API: {response.text}")
|
39 |
-
return {"articles": [], "comparative_sentiment_score": {}, "final_sentiment_analysis": "", "audio_url": None}
|
40 |
except Exception as e:
|
41 |
-
st.error(f"Error
|
42 |
-
return {"articles": [], "comparative_sentiment_score": {}, "final_sentiment_analysis": "", "
|
43 |
|
44 |
def main():
|
45 |
st.title("📰 News Summarization and Analysis")
|
@@ -59,33 +94,53 @@ def main():
|
|
59 |
st.sidebar.error("Please enter a valid company name (at least 2 characters)")
|
60 |
else:
|
61 |
with st.spinner("Analyzing news articles..."):
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
65 |
# Display Articles
|
66 |
st.header("📑 News Articles")
|
67 |
-
for idx, article in enumerate(
|
68 |
with st.expander(f"Article {idx}: {article['title']}"):
|
69 |
-
|
70 |
-
if "
|
71 |
-
st.
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
# Enhanced sentiment display
|
75 |
if "sentiment" in article:
|
76 |
sentiment_col1, sentiment_col2 = st.columns(2)
|
77 |
with sentiment_col1:
|
78 |
-
st.
|
79 |
-
st.write("
|
|
|
|
|
80 |
|
81 |
with sentiment_col2:
|
82 |
# Display fine-grained sentiment if available
|
83 |
if "fine_grained_sentiment" in article and article["fine_grained_sentiment"]:
|
|
|
84 |
fine_grained = article["fine_grained_sentiment"]
|
85 |
if "category" in fine_grained:
|
86 |
-
st.write("
|
87 |
if "confidence" in fine_grained:
|
88 |
-
st.write("
|
89 |
|
90 |
# Display sentiment indices if available
|
91 |
if "sentiment_indices" in article and article["sentiment_indices"]:
|
@@ -146,24 +201,20 @@ def main():
|
|
146 |
st.markdown(f"> {target['context']}")
|
147 |
st.markdown("---")
|
148 |
|
|
|
149 |
if "url" in article:
|
150 |
-
st.
|
151 |
|
152 |
# Display Comparative Analysis
|
153 |
st.header("📊 Comparative Analysis")
|
154 |
-
analysis =
|
155 |
|
156 |
# Sentiment Distribution
|
157 |
if "sentiment_distribution" in analysis:
|
158 |
st.subheader("Sentiment Distribution")
|
159 |
|
160 |
-
# Debug: Print sentiment distribution data
|
161 |
-
print("Sentiment Distribution Data:")
|
162 |
-
print(json.dumps(analysis["sentiment_distribution"], indent=2))
|
163 |
-
|
164 |
sentiment_dist = analysis["sentiment_distribution"]
|
165 |
|
166 |
-
# Create a very simple visualization that will definitely work
|
167 |
try:
|
168 |
# Extract basic sentiment data
|
169 |
if isinstance(sentiment_dist, dict):
|
@@ -187,7 +238,7 @@ def main():
|
|
187 |
else:
|
188 |
percentages = {k: 0 for k in basic_dist}
|
189 |
|
190 |
-
# Display as
|
191 |
st.write("**Sentiment Distribution:**")
|
192 |
|
193 |
col1, col2, col3 = st.columns(3)
|
@@ -210,13 +261,11 @@ def main():
|
|
210 |
f"{percentages.get('neutral', 0):.1f}%"
|
211 |
)
|
212 |
|
213 |
-
# Create
|
214 |
-
|
215 |
-
# Create a simple DataFrame with consistent capitalization and percentages
|
216 |
chart_data = pd.DataFrame({
|
217 |
'Sentiment': ['Positive', 'Negative', 'Neutral'],
|
218 |
'Count': [
|
219 |
-
basic_dist.get('positive', 0),
|
220 |
basic_dist.get('negative', 0),
|
221 |
basic_dist.get('neutral', 0)
|
222 |
],
|
@@ -227,71 +276,44 @@ def main():
|
|
227 |
]
|
228 |
})
|
229 |
|
230 |
-
# Add debug output to see what's in the data
|
231 |
-
print("Chart Data for Sentiment Distribution:")
|
232 |
-
print(chart_data)
|
233 |
-
|
234 |
-
# Create a simple bar chart with percentages
|
235 |
chart = alt.Chart(chart_data).mark_bar().encode(
|
236 |
-
y='Sentiment',
|
237 |
-
x='Count',
|
238 |
color=alt.Color('Sentiment', scale=alt.Scale(
|
239 |
domain=['Positive', 'Negative', 'Neutral'],
|
240 |
range=['green', 'red', 'gray']
|
241 |
)),
|
242 |
-
tooltip=['Sentiment', 'Count', 'Percentage']
|
243 |
).properties(
|
244 |
width=600,
|
245 |
height=300
|
246 |
)
|
247 |
|
248 |
-
# Add text labels with percentages
|
249 |
text = chart.mark_text(
|
250 |
align='left',
|
251 |
baseline='middle',
|
252 |
-
dx=3
|
253 |
).encode(
|
254 |
text='Percentage'
|
255 |
)
|
256 |
|
257 |
-
# Combine the chart and text
|
258 |
chart_with_text = (chart + text)
|
259 |
-
|
260 |
st.altair_chart(chart_with_text, use_container_width=True)
|
261 |
|
262 |
except Exception as e:
|
263 |
st.error(f"Error creating visualization: {str(e)}")
|
264 |
-
st.write("Fallback to simple text display:")
|
265 |
-
if isinstance(sentiment_dist, dict):
|
266 |
-
if "basic" in sentiment_dist:
|
267 |
-
st.write(f"Positive: {sentiment_dist['basic'].get('positive', 0)}")
|
268 |
-
st.write(f"Negative: {sentiment_dist['basic'].get('negative', 0)}")
|
269 |
-
st.write(f"Neutral: {sentiment_dist['basic'].get('neutral', 0)}")
|
270 |
-
else:
|
271 |
-
st.write(f"Positive: {sentiment_dist.get('positive', 0)}")
|
272 |
-
st.write(f"Negative: {sentiment_dist.get('negative', 0)}")
|
273 |
-
st.write(f"Neutral: {sentiment_dist.get('neutral', 0)}")
|
274 |
-
else:
|
275 |
-
st.write("No valid sentiment data available")
|
276 |
|
277 |
# Display sentiment indices if available
|
278 |
if "sentiment_indices" in analysis and analysis["sentiment_indices"]:
|
279 |
st.subheader("Sentiment Indices")
|
280 |
|
281 |
-
# Debug: Print sentiment indices
|
282 |
-
print("Sentiment Indices:")
|
283 |
-
print(json.dumps(analysis["sentiment_indices"], indent=2))
|
284 |
-
|
285 |
-
# Get the indices data
|
286 |
indices = analysis["sentiment_indices"]
|
287 |
|
288 |
-
# Create a very simple visualization that will definitely work
|
289 |
try:
|
290 |
if isinstance(indices, dict):
|
291 |
-
# Display as
|
292 |
cols = st.columns(3)
|
293 |
|
294 |
-
# Define display names and descriptions
|
295 |
display_names = {
|
296 |
"positivity_index": "Positivity",
|
297 |
"negativity_index": "Negativity",
|
@@ -301,22 +323,18 @@ def main():
|
|
301 |
"esg_relevance": "ESG Relevance"
|
302 |
}
|
303 |
|
304 |
-
# Display each index as a metric
|
305 |
for i, (key, value) in enumerate(indices.items()):
|
306 |
if isinstance(value, (int, float)):
|
307 |
with cols[i % 3]:
|
308 |
display_name = display_names.get(key, key.replace("_", " ").title())
|
309 |
st.metric(display_name, f"{value:.2f}")
|
310 |
|
311 |
-
# Create
|
312 |
-
|
313 |
-
# Create a simple DataFrame
|
314 |
chart_data = pd.DataFrame({
|
315 |
'Index': [display_names.get(k, k.replace("_", " ").title()) for k in indices.keys()],
|
316 |
'Value': [v if isinstance(v, (int, float)) else 0 for v in indices.values()]
|
317 |
})
|
318 |
|
319 |
-
# Create a simple bar chart
|
320 |
chart = alt.Chart(chart_data).mark_bar().encode(
|
321 |
x='Value',
|
322 |
y='Index',
|
@@ -338,89 +356,107 @@ def main():
|
|
338 |
- **Confidence**: Confidence in the sentiment analysis (0-1)
|
339 |
- **ESG Relevance**: Relevance to Environmental, Social, and Governance topics (0-1)
|
340 |
""")
|
341 |
-
else:
|
342 |
-
st.warning("Sentiment indices data is not in the expected format.")
|
343 |
-
st.write("No valid sentiment indices available")
|
344 |
except Exception as e:
|
345 |
st.error(f"Error creating indices visualization: {str(e)}")
|
346 |
-
st.write("Fallback to simple text display:")
|
347 |
-
if isinstance(indices, dict):
|
348 |
-
for key, value in indices.items():
|
349 |
-
if isinstance(value, (int, float)):
|
350 |
-
st.write(f"{key.replace('_', ' ').title()}: {value:.2f}")
|
351 |
-
else:
|
352 |
-
st.write("No valid sentiment indices data available")
|
353 |
-
|
354 |
-
# Source Distribution
|
355 |
-
if "source_distribution" in analysis:
|
356 |
-
st.subheader("Source Distribution")
|
357 |
-
source_df = pd.DataFrame.from_dict(
|
358 |
-
analysis["source_distribution"],
|
359 |
-
orient='index',
|
360 |
-
columns=['Count']
|
361 |
-
)
|
362 |
-
st.bar_chart(source_df)
|
363 |
|
364 |
-
#
|
365 |
-
|
366 |
-
st.subheader("Common Topics")
|
367 |
-
st.write(", ".join(analysis["common_topics"]) if analysis["common_topics"] else "No common topics found")
|
368 |
|
369 |
-
#
|
370 |
-
if "
|
371 |
-
st.
|
372 |
-
|
373 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
374 |
|
375 |
-
# Display
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
|
380 |
-
#
|
381 |
-
|
382 |
-
indices = analysis["sentiment_indices"]
|
383 |
-
# Verify we have valid data
|
384 |
-
if indices and any(isinstance(v, (int, float)) for v in indices.values()):
|
385 |
-
st.sidebar.markdown("### Sentiment Indices")
|
386 |
-
for idx_name, idx_value in indices.items():
|
387 |
-
if isinstance(idx_value, (int, float)):
|
388 |
-
formatted_name = " ".join(word.capitalize() for word in idx_name.replace("_", " ").split())
|
389 |
-
st.sidebar.metric(formatted_name, f"{idx_value:.2f}")
|
390 |
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
if "agreement" in ensemble:
|
398 |
-
st.metric("Model Agreement", f"{ensemble['agreement']*100:.1f}%")
|
399 |
-
|
400 |
-
# Individual model results
|
401 |
-
if "models" in ensemble:
|
402 |
-
st.subheader("Individual Model Results")
|
403 |
-
models_data = []
|
404 |
-
for model_name, model_info in ensemble["models"].items():
|
405 |
-
models_data.append({
|
406 |
-
"Model": model_name,
|
407 |
-
"Sentiment": model_info.get("sentiment", "N/A"),
|
408 |
-
"Confidence": f"{model_info.get('confidence', 0)*100:.1f}%"
|
409 |
-
})
|
410 |
-
|
411 |
-
if models_data:
|
412 |
-
st.table(pd.DataFrame(models_data))
|
413 |
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
420 |
|
421 |
# Total Articles
|
422 |
if "total_articles" in analysis:
|
423 |
st.sidebar.info(f"Found {analysis['total_articles']} articles")
|
|
|
|
|
|
|
|
|
424 |
|
425 |
# Add a disclaimer
|
426 |
st.sidebar.markdown("---")
|
|
|
1 |
"""Streamlit frontend for the News Summarization application."""
|
2 |
|
3 |
import streamlit as st
|
|
|
4 |
import pandas as pd
|
5 |
import json
|
|
|
6 |
import os
|
7 |
import plotly.express as px
|
8 |
import altair as alt
|
9 |
+
from utils import (
|
10 |
+
analyze_company_data,
|
11 |
+
TextToSpeechConverter,
|
12 |
+
get_translator,
|
13 |
+
NewsExtractor,
|
14 |
+
SentimentAnalyzer,
|
15 |
+
TextSummarizer
|
16 |
+
)
|
17 |
|
18 |
+
# Set page config
|
19 |
st.set_page_config(
|
20 |
page_title="News Summarization App",
|
21 |
page_icon="📰",
|
22 |
layout="wide"
|
23 |
)
|
24 |
|
25 |
+
# Show loading message
|
26 |
+
with st.spinner("Initializing the application... Please wait while we load the models."):
|
27 |
+
# Initialize components
|
28 |
+
try:
|
29 |
+
st.success("Application initialized successfully!")
|
30 |
+
except Exception as e:
|
31 |
+
st.error(f"Error initializing application: {str(e)}")
|
32 |
+
st.info("Please try refreshing the page.")
|
33 |
+
|
34 |
+
def process_company(company_name):
|
35 |
+
"""Process company data directly."""
|
36 |
try:
|
37 |
+
# Call the analysis function directly from utils
|
38 |
+
data = analyze_company_data(company_name)
|
39 |
+
|
40 |
+
# Generate Hindi audio from final analysis
|
41 |
+
if data.get("final_sentiment_analysis"):
|
42 |
+
# Get the translator
|
43 |
+
translator = get_translator()
|
44 |
+
if translator:
|
45 |
+
try:
|
46 |
+
# Create a more detailed Hindi explanation
|
47 |
+
sentiment_explanation = f"""
|
48 |
+
{company_name} के समाचारों का विश्लेषण:
|
49 |
+
|
50 |
+
समग्र भावना: {data['final_sentiment_analysis']}
|
51 |
+
|
52 |
+
भावनात्मक विश्लेषण:
|
53 |
+
- सकारात्मक भावना: {data.get('comparative_sentiment_score', {}).get('sentiment_indices', {}).get('positivity_index', 0):.2f}
|
54 |
+
- नकारात्मक भावना: {data.get('comparative_sentiment_score', {}).get('sentiment_indices', {}).get('negativity_index', 0):.2f}
|
55 |
+
- भावनात्मक तीव्रता: {data.get('comparative_sentiment_score', {}).get('sentiment_indices', {}).get('emotional_intensity', 0):.2f}
|
56 |
+
|
57 |
+
विश्वसनीयता स्कोर: {data.get('comparative_sentiment_score', {}).get('sentiment_indices', {}).get('confidence_score', 0):.2f}
|
58 |
+
"""
|
59 |
+
|
60 |
+
# Generate Hindi audio
|
61 |
+
tts_converter = TextToSpeechConverter()
|
62 |
+
audio_path = tts_converter.generate_audio(
|
63 |
+
sentiment_explanation,
|
64 |
+
f'{company_name}_summary'
|
65 |
+
)
|
66 |
+
data['audio_path'] = audio_path
|
67 |
+
except Exception as e:
|
68 |
+
print(f"Error generating Hindi audio: {str(e)}")
|
69 |
+
data['audio_path'] = None
|
70 |
+
else:
|
71 |
+
print("Translator not available")
|
72 |
+
data['audio_path'] = None
|
73 |
|
74 |
+
return data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
except Exception as e:
|
76 |
+
st.error(f"Error processing company: {str(e)}")
|
77 |
+
return {"articles": [], "comparative_sentiment_score": {}, "final_sentiment_analysis": "", "audio_path": None}
|
78 |
|
79 |
def main():
|
80 |
st.title("📰 News Summarization and Analysis")
|
|
|
94 |
st.sidebar.error("Please enter a valid company name (at least 2 characters)")
|
95 |
else:
|
96 |
with st.spinner("Analyzing news articles..."):
|
97 |
+
try:
|
98 |
+
# Process company data
|
99 |
+
data = process_company(company)
|
100 |
+
|
101 |
+
if not data["articles"]:
|
102 |
+
st.error("No articles found for analysis.")
|
103 |
+
return
|
104 |
+
|
105 |
# Display Articles
|
106 |
st.header("📑 News Articles")
|
107 |
+
for idx, article in enumerate(data["articles"], 1):
|
108 |
with st.expander(f"Article {idx}: {article['title']}"):
|
109 |
+
# Display content with proper formatting
|
110 |
+
if article.get("content"):
|
111 |
+
st.markdown("**Content:**")
|
112 |
+
st.write(article["content"])
|
113 |
+
else:
|
114 |
+
st.warning("No content available for this article")
|
115 |
+
|
116 |
+
# Display summary if available
|
117 |
+
if article.get("summary"):
|
118 |
+
st.markdown("**Summary:**")
|
119 |
+
st.write(article["summary"])
|
120 |
+
|
121 |
+
# Display source
|
122 |
+
if article.get("source"):
|
123 |
+
st.markdown("**Source:**")
|
124 |
+
st.write(article["source"])
|
125 |
|
126 |
# Enhanced sentiment display
|
127 |
if "sentiment" in article:
|
128 |
sentiment_col1, sentiment_col2 = st.columns(2)
|
129 |
with sentiment_col1:
|
130 |
+
st.markdown("**Basic Sentiment:**")
|
131 |
+
st.write(article["sentiment"])
|
132 |
+
if "sentiment_score" in article:
|
133 |
+
st.write(f"**Confidence Score:** {article['sentiment_score']*100:.1f}%")
|
134 |
|
135 |
with sentiment_col2:
|
136 |
# Display fine-grained sentiment if available
|
137 |
if "fine_grained_sentiment" in article and article["fine_grained_sentiment"]:
|
138 |
+
st.markdown("**Detailed Sentiment:**")
|
139 |
fine_grained = article["fine_grained_sentiment"]
|
140 |
if "category" in fine_grained:
|
141 |
+
st.write(f"Category: {fine_grained['category']}")
|
142 |
if "confidence" in fine_grained:
|
143 |
+
st.write(f"Confidence: {fine_grained['confidence']*100:.1f}%")
|
144 |
|
145 |
# Display sentiment indices if available
|
146 |
if "sentiment_indices" in article and article["sentiment_indices"]:
|
|
|
201 |
st.markdown(f"> {target['context']}")
|
202 |
st.markdown("---")
|
203 |
|
204 |
+
# Display URL if available
|
205 |
if "url" in article:
|
206 |
+
st.markdown(f"**[Read More]({article['url']})**")
|
207 |
|
208 |
# Display Comparative Analysis
|
209 |
st.header("📊 Comparative Analysis")
|
210 |
+
analysis = data.get("comparative_sentiment_score", {})
|
211 |
|
212 |
# Sentiment Distribution
|
213 |
if "sentiment_distribution" in analysis:
|
214 |
st.subheader("Sentiment Distribution")
|
215 |
|
|
|
|
|
|
|
|
|
216 |
sentiment_dist = analysis["sentiment_distribution"]
|
217 |
|
|
|
218 |
try:
|
219 |
# Extract basic sentiment data
|
220 |
if isinstance(sentiment_dist, dict):
|
|
|
238 |
else:
|
239 |
percentages = {k: 0 for k in basic_dist}
|
240 |
|
241 |
+
# Display as metrics
|
242 |
st.write("**Sentiment Distribution:**")
|
243 |
|
244 |
col1, col2, col3 = st.columns(3)
|
|
|
261 |
f"{percentages.get('neutral', 0):.1f}%"
|
262 |
)
|
263 |
|
264 |
+
# Create visualization
|
|
|
|
|
265 |
chart_data = pd.DataFrame({
|
266 |
'Sentiment': ['Positive', 'Negative', 'Neutral'],
|
267 |
'Count': [
|
268 |
+
basic_dist.get('positive', 0),
|
269 |
basic_dist.get('negative', 0),
|
270 |
basic_dist.get('neutral', 0)
|
271 |
],
|
|
|
276 |
]
|
277 |
})
|
278 |
|
|
|
|
|
|
|
|
|
|
|
279 |
chart = alt.Chart(chart_data).mark_bar().encode(
|
280 |
+
y='Sentiment',
|
281 |
+
x='Count',
|
282 |
color=alt.Color('Sentiment', scale=alt.Scale(
|
283 |
domain=['Positive', 'Negative', 'Neutral'],
|
284 |
range=['green', 'red', 'gray']
|
285 |
)),
|
286 |
+
tooltip=['Sentiment', 'Count', 'Percentage']
|
287 |
).properties(
|
288 |
width=600,
|
289 |
height=300
|
290 |
)
|
291 |
|
|
|
292 |
text = chart.mark_text(
|
293 |
align='left',
|
294 |
baseline='middle',
|
295 |
+
dx=3
|
296 |
).encode(
|
297 |
text='Percentage'
|
298 |
)
|
299 |
|
|
|
300 |
chart_with_text = (chart + text)
|
|
|
301 |
st.altair_chart(chart_with_text, use_container_width=True)
|
302 |
|
303 |
except Exception as e:
|
304 |
st.error(f"Error creating visualization: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
305 |
|
306 |
# Display sentiment indices if available
|
307 |
if "sentiment_indices" in analysis and analysis["sentiment_indices"]:
|
308 |
st.subheader("Sentiment Indices")
|
309 |
|
|
|
|
|
|
|
|
|
|
|
310 |
indices = analysis["sentiment_indices"]
|
311 |
|
|
|
312 |
try:
|
313 |
if isinstance(indices, dict):
|
314 |
+
# Display as metrics in columns
|
315 |
cols = st.columns(3)
|
316 |
|
|
|
317 |
display_names = {
|
318 |
"positivity_index": "Positivity",
|
319 |
"negativity_index": "Negativity",
|
|
|
323 |
"esg_relevance": "ESG Relevance"
|
324 |
}
|
325 |
|
|
|
326 |
for i, (key, value) in enumerate(indices.items()):
|
327 |
if isinstance(value, (int, float)):
|
328 |
with cols[i % 3]:
|
329 |
display_name = display_names.get(key, key.replace("_", " ").title())
|
330 |
st.metric(display_name, f"{value:.2f}")
|
331 |
|
332 |
+
# Create visualization
|
|
|
|
|
333 |
chart_data = pd.DataFrame({
|
334 |
'Index': [display_names.get(k, k.replace("_", " ").title()) for k in indices.keys()],
|
335 |
'Value': [v if isinstance(v, (int, float)) else 0 for v in indices.values()]
|
336 |
})
|
337 |
|
|
|
338 |
chart = alt.Chart(chart_data).mark_bar().encode(
|
339 |
x='Value',
|
340 |
y='Index',
|
|
|
356 |
- **Confidence**: Confidence in the sentiment analysis (0-1)
|
357 |
- **ESG Relevance**: Relevance to Environmental, Social, and Governance topics (0-1)
|
358 |
""")
|
|
|
|
|
|
|
359 |
except Exception as e:
|
360 |
st.error(f"Error creating indices visualization: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
361 |
|
362 |
+
# Display Final Analysis
|
363 |
+
st.header("📊 Final Analysis")
|
|
|
|
|
364 |
|
365 |
+
# Display overall sentiment analysis with enhanced formatting
|
366 |
+
if data.get("final_sentiment_analysis"):
|
367 |
+
st.markdown("### Overall Sentiment Analysis")
|
368 |
+
analysis_parts = data["final_sentiment_analysis"].split(". ")
|
369 |
+
if len(analysis_parts) >= 2:
|
370 |
+
# First sentence - Overall sentiment
|
371 |
+
st.markdown(f"**{analysis_parts[0]}.**")
|
372 |
+
# Second sentence - Key findings
|
373 |
+
st.markdown(f"**{analysis_parts[1]}.**")
|
374 |
+
# Third sentence - Additional insights (if available)
|
375 |
+
if len(analysis_parts) > 2:
|
376 |
+
st.markdown(f"**{analysis_parts[2]}.**")
|
377 |
+
else:
|
378 |
+
st.write(data["final_sentiment_analysis"])
|
379 |
+
|
380 |
+
# Add sentiment strength indicator
|
381 |
+
if data.get("ensemble_info"):
|
382 |
+
ensemble_info = data["ensemble_info"]
|
383 |
+
if "model_agreement" in ensemble_info:
|
384 |
+
agreement = ensemble_info["model_agreement"]
|
385 |
+
strength = "Strong" if agreement > 0.8 else "Moderate" if agreement > 0.6 else "Weak"
|
386 |
+
st.markdown(f"**Sentiment Strength:** {strength} (Agreement: {agreement:.2f})")
|
387 |
|
388 |
+
# Display ensemble model details
|
389 |
+
if data.get("ensemble_info"):
|
390 |
+
st.subheader("Ensemble Model Details")
|
391 |
+
ensemble_info = data["ensemble_info"]
|
392 |
|
393 |
+
# Create columns for model details
|
394 |
+
model_cols = st.columns(3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
395 |
|
396 |
+
with model_cols[0]:
|
397 |
+
st.markdown("**Primary Model:**")
|
398 |
+
if "models" in ensemble_info and "transformer" in ensemble_info["models"]:
|
399 |
+
model = ensemble_info["models"]["transformer"]
|
400 |
+
st.write(f"Sentiment: {model['sentiment']}")
|
401 |
+
st.write(f"Score: {model['score']:.3f}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
402 |
|
403 |
+
with model_cols[1]:
|
404 |
+
st.markdown("**TextBlob Analysis:**")
|
405 |
+
if "models" in ensemble_info and "textblob" in ensemble_info["models"]:
|
406 |
+
model = ensemble_info["models"]["textblob"]
|
407 |
+
st.write(f"Sentiment: {model['sentiment']}")
|
408 |
+
st.write(f"Score: {model['score']:.3f}")
|
409 |
+
|
410 |
+
with model_cols[2]:
|
411 |
+
st.markdown("**VADER Analysis:**")
|
412 |
+
if "models" in ensemble_info and "vader" in ensemble_info["models"]:
|
413 |
+
model = ensemble_info["models"]["vader"]
|
414 |
+
st.write(f"Sentiment: {model['sentiment']}")
|
415 |
+
st.write(f"Score: {model['score']:.3f}")
|
416 |
+
|
417 |
+
# Display ensemble agreement if available
|
418 |
+
if "model_agreement" in ensemble_info:
|
419 |
+
st.markdown(f"**Model Agreement:** {ensemble_info['model_agreement']:.3f}")
|
420 |
+
|
421 |
+
# Display Hindi audio player
|
422 |
+
st.subheader("🔊 Listen to Analysis (Hindi)")
|
423 |
+
if data.get("audio_path") and os.path.exists(data["audio_path"]):
|
424 |
+
st.audio(data["audio_path"])
|
425 |
+
else:
|
426 |
+
st.info("Generating Hindi audio summary...")
|
427 |
+
with st.spinner("Please wait while we generate the Hindi audio summary..."):
|
428 |
+
# Try to generate audio again
|
429 |
+
translator = get_translator()
|
430 |
+
if translator and data.get("final_sentiment_analysis"):
|
431 |
+
try:
|
432 |
+
# Translate final analysis to Hindi
|
433 |
+
translated_analysis = translator.translate(
|
434 |
+
data["final_sentiment_analysis"],
|
435 |
+
dest='hi'
|
436 |
+
).text
|
437 |
+
|
438 |
+
# Generate Hindi audio
|
439 |
+
tts_converter = TextToSpeechConverter()
|
440 |
+
audio_path = tts_converter.generate_audio(
|
441 |
+
translated_analysis,
|
442 |
+
f'{company}_summary'
|
443 |
+
)
|
444 |
+
if audio_path and os.path.exists(audio_path):
|
445 |
+
st.audio(audio_path)
|
446 |
+
else:
|
447 |
+
st.error("Hindi audio summary not available")
|
448 |
+
except Exception as e:
|
449 |
+
st.error(f"Error generating Hindi audio: {str(e)}")
|
450 |
+
else:
|
451 |
+
st.error("Hindi audio summary not available")
|
452 |
|
453 |
# Total Articles
|
454 |
if "total_articles" in analysis:
|
455 |
st.sidebar.info(f"Found {analysis['total_articles']} articles")
|
456 |
+
|
457 |
+
except Exception as e:
|
458 |
+
st.error(f"Error analyzing company data: {str(e)}")
|
459 |
+
print(f"Error: {str(e)}")
|
460 |
|
461 |
# Add a disclaimer
|
462 |
st.sidebar.markdown("---")
|