Spaces:
Running
Running
Replit Deployment
commited on
Commit
·
bb6d7b4
1
Parent(s):
0948a02
Deployment from Replit
Browse files- assets/cyberforge_logo.svg +72 -0
- assets/logo.svg +71 -0
- components/__init__.py +1 -0
- components/alerts.py +554 -0
- components/dashboard.py +335 -0
- components/live_feed.py +769 -0
- components/monitoring.py +555 -0
- components/reports.py +442 -0
- components/search_trends.py +684 -0
- components/subscriptions.py +478 -0
- components/threats.py +543 -0
- components/web_scraper.py +330 -0
- hf_app.py +146 -0
- hf_database.py +100 -0
- huggingface-space.yml +9 -0
- requirements.txt +20 -0
- src/database_init.py +113 -0
- src/models/__init__.py +3 -0
- src/models/alert.py +68 -0
- src/models/base.py +20 -0
- src/models/dark_web_content.py +93 -0
- src/models/indicator.py +49 -0
- src/models/report.py +78 -0
- src/models/search_history.py +146 -0
- src/models/subscription.py +143 -0
- src/models/threat.py +76 -0
- src/models/user.py +32 -0
- src/streamlit_database.py +850 -0
- src/streamlit_subscription_services.py +450 -0
assets/cyberforge_logo.svg
ADDED
|
assets/logo.svg
ADDED
|
components/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# This file is intentionally left empty to make the directory a Python package
|
components/alerts.py
ADDED
@@ -0,0 +1,554 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import plotly.express as px
|
4 |
+
import plotly.graph_objects as go
|
5 |
+
import numpy as np
|
6 |
+
from datetime import datetime, timedelta
|
7 |
+
|
8 |
+
def render_alerts():
|
9 |
+
st.title("Alert Management")
|
10 |
+
|
11 |
+
# Alert Overview
|
12 |
+
st.subheader("Alert Overview")
|
13 |
+
|
14 |
+
# Alert metrics
|
15 |
+
col1, col2, col3, col4, col5 = st.columns(5)
|
16 |
+
|
17 |
+
with col1:
|
18 |
+
st.metric(
|
19 |
+
label="Active Alerts",
|
20 |
+
value="27",
|
21 |
+
delta="4",
|
22 |
+
delta_color="inverse"
|
23 |
+
)
|
24 |
+
|
25 |
+
with col2:
|
26 |
+
st.metric(
|
27 |
+
label="Critical",
|
28 |
+
value="8",
|
29 |
+
delta="2",
|
30 |
+
delta_color="inverse"
|
31 |
+
)
|
32 |
+
|
33 |
+
with col3:
|
34 |
+
st.metric(
|
35 |
+
label="High",
|
36 |
+
value="12",
|
37 |
+
delta="3",
|
38 |
+
delta_color="inverse"
|
39 |
+
)
|
40 |
+
|
41 |
+
with col4:
|
42 |
+
st.metric(
|
43 |
+
label="Medium",
|
44 |
+
value="5",
|
45 |
+
delta="-1",
|
46 |
+
delta_color="normal"
|
47 |
+
)
|
48 |
+
|
49 |
+
with col5:
|
50 |
+
st.metric(
|
51 |
+
label="Low",
|
52 |
+
value="2",
|
53 |
+
delta="0",
|
54 |
+
delta_color="normal"
|
55 |
+
)
|
56 |
+
|
57 |
+
# Filters for alerts
|
58 |
+
with st.container():
|
59 |
+
st.markdown("### Alert Filters")
|
60 |
+
|
61 |
+
filter_col1, filter_col2, filter_col3, filter_col4 = st.columns(4)
|
62 |
+
|
63 |
+
with filter_col1:
|
64 |
+
severity_filter = st.multiselect(
|
65 |
+
"Severity",
|
66 |
+
["Critical", "High", "Medium", "Low"],
|
67 |
+
default=["Critical", "High", "Medium", "Low"]
|
68 |
+
)
|
69 |
+
|
70 |
+
with filter_col2:
|
71 |
+
status_filter = st.multiselect(
|
72 |
+
"Status",
|
73 |
+
["New", "In Progress", "Resolved", "False Positive"],
|
74 |
+
default=["New", "In Progress"]
|
75 |
+
)
|
76 |
+
|
77 |
+
with filter_col3:
|
78 |
+
date_range = st.selectbox(
|
79 |
+
"Time Range",
|
80 |
+
["Last 24 Hours", "Last 7 Days", "Last 30 Days", "Custom Range"],
|
81 |
+
index=1
|
82 |
+
)
|
83 |
+
|
84 |
+
with filter_col4:
|
85 |
+
category_filter = st.multiselect(
|
86 |
+
"Category",
|
87 |
+
["Data Breach", "Ransomware", "Credentials", "PII", "Brand Abuse", "Source Code", "Other"],
|
88 |
+
default=["Data Breach", "Credentials", "PII"]
|
89 |
+
)
|
90 |
+
|
91 |
+
# Alert list
|
92 |
+
st.markdown("### Active Alerts")
|
93 |
+
|
94 |
+
# Sample alert data
|
95 |
+
alerts = [
|
96 |
+
{
|
97 |
+
"id": "ALERT-2025-04081",
|
98 |
+
"timestamp": "2025-04-08 14:32:21",
|
99 |
+
"severity": "Critical",
|
100 |
+
"category": "Data Breach",
|
101 |
+
"description": "Patient records from Memorial Hospital found on dark web marketplace.",
|
102 |
+
"status": "New",
|
103 |
+
"source": "AlphaBay Market"
|
104 |
+
},
|
105 |
+
{
|
106 |
+
"id": "ALERT-2025-04082",
|
107 |
+
"timestamp": "2025-04-08 10:15:43",
|
108 |
+
"severity": "Critical",
|
109 |
+
"category": "Ransomware",
|
110 |
+
"description": "Company mentioned in ransomware group's leak site as new victim.",
|
111 |
+
"status": "New",
|
112 |
+
"source": "BlackCat Leak Site"
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"id": "ALERT-2025-04083",
|
116 |
+
"timestamp": "2025-04-08 08:42:19",
|
117 |
+
"severity": "High",
|
118 |
+
"category": "Credentials",
|
119 |
+
"description": "123 employee credentials found in new breach compilation.",
|
120 |
+
"status": "In Progress",
|
121 |
+
"source": "BreachForums"
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"id": "ALERT-2025-04071",
|
125 |
+
"timestamp": "2025-04-07 22:03:12",
|
126 |
+
"severity": "High",
|
127 |
+
"category": "PII",
|
128 |
+
"description": "Customer PII being offered for sale on hacking forum.",
|
129 |
+
"status": "In Progress",
|
130 |
+
"source": "XSS Forum"
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"id": "ALERT-2025-04072",
|
134 |
+
"timestamp": "2025-04-07 18:37:56",
|
135 |
+
"severity": "Medium",
|
136 |
+
"category": "Brand Abuse",
|
137 |
+
"description": "Phishing campaign using company brand assets detected.",
|
138 |
+
"status": "New",
|
139 |
+
"source": "Telegram Channel"
|
140 |
+
},
|
141 |
+
{
|
142 |
+
"id": "ALERT-2025-04073",
|
143 |
+
"timestamp": "2025-04-07 14:21:08",
|
144 |
+
"severity": "Medium",
|
145 |
+
"category": "Source Code",
|
146 |
+
"description": "Fragments of internal source code shared in paste site.",
|
147 |
+
"status": "In Progress",
|
148 |
+
"source": "DeepPaste"
|
149 |
+
},
|
150 |
+
{
|
151 |
+
"id": "ALERT-2025-04063",
|
152 |
+
"timestamp": "2025-04-06 20:15:37",
|
153 |
+
"severity": "Low",
|
154 |
+
"category": "Credentials",
|
155 |
+
"description": "Legacy system credentials posted in hacking forum.",
|
156 |
+
"status": "New",
|
157 |
+
"source": "RaidForums"
|
158 |
+
}
|
159 |
+
]
|
160 |
+
|
161 |
+
# Create a dataframe for the alerts
|
162 |
+
alert_df = pd.DataFrame(alerts)
|
163 |
+
|
164 |
+
# Apply colors to severity column
|
165 |
+
def color_severity(val):
|
166 |
+
color_map = {
|
167 |
+
'Critical': '#E74C3C',
|
168 |
+
'High': '#F1C40F',
|
169 |
+
'Medium': '#3498DB',
|
170 |
+
'Low': '#2ECC71'
|
171 |
+
}
|
172 |
+
return f'background-color: {color_map.get(val, "#ECF0F1")}'
|
173 |
+
|
174 |
+
# Style the dataframe
|
175 |
+
styled_df = alert_df.style.applymap(color_severity, subset=['severity'])
|
176 |
+
|
177 |
+
# Display the table
|
178 |
+
st.dataframe(styled_df, use_container_width=True, height=300)
|
179 |
+
|
180 |
+
# Action buttons for alerts
|
181 |
+
action_col1, action_col2, action_col3, action_col4, action_col5 = st.columns(5)
|
182 |
+
|
183 |
+
with action_col1:
|
184 |
+
st.button("Investigate", key="investigate_alert")
|
185 |
+
|
186 |
+
with action_col2:
|
187 |
+
st.button("Mark as Resolved", key="resolve_alert")
|
188 |
+
|
189 |
+
with action_col3:
|
190 |
+
st.button("Assign to Analyst", key="assign_alert")
|
191 |
+
|
192 |
+
with action_col4:
|
193 |
+
st.button("Mark as False Positive", key="false_positive")
|
194 |
+
|
195 |
+
with action_col5:
|
196 |
+
st.button("Generate Report", key="generate_report")
|
197 |
+
|
198 |
+
# Alert visualization
|
199 |
+
st.markdown("### Alert Analytics")
|
200 |
+
|
201 |
+
# Tabs for different alert visualizations
|
202 |
+
tab1, tab2, tab3 = st.tabs(["Alert Trend", "Category Distribution", "Source Analysis"])
|
203 |
+
|
204 |
+
with tab1:
|
205 |
+
# Alert trend over time
|
206 |
+
st.subheader("Alert Trend (Last 30 Days)")
|
207 |
+
|
208 |
+
# Generate dates for the past 30 days
|
209 |
+
dates = [(datetime.now() - timedelta(days=i)).strftime('%Y-%m-%d') for i in range(30, 0, -1)]
|
210 |
+
|
211 |
+
# Sample data for alert trends
|
212 |
+
critical_alerts = np.random.randint(5, 12, 30)
|
213 |
+
high_alerts = np.random.randint(8, 20, 30)
|
214 |
+
medium_alerts = np.random.randint(12, 25, 30)
|
215 |
+
low_alerts = np.random.randint(15, 30, 30)
|
216 |
+
|
217 |
+
trend_data = pd.DataFrame({
|
218 |
+
'Date': dates,
|
219 |
+
'Critical': critical_alerts,
|
220 |
+
'High': high_alerts,
|
221 |
+
'Medium': medium_alerts,
|
222 |
+
'Low': low_alerts
|
223 |
+
})
|
224 |
+
|
225 |
+
# Create a stacked area chart
|
226 |
+
fig = go.Figure()
|
227 |
+
|
228 |
+
fig.add_trace(go.Scatter(
|
229 |
+
x=trend_data['Date'], y=trend_data['Critical'],
|
230 |
+
mode='lines',
|
231 |
+
line=dict(width=0.5, color='#E74C3C'),
|
232 |
+
stackgroup='one',
|
233 |
+
name='Critical'
|
234 |
+
))
|
235 |
+
|
236 |
+
fig.add_trace(go.Scatter(
|
237 |
+
x=trend_data['Date'], y=trend_data['High'],
|
238 |
+
mode='lines',
|
239 |
+
line=dict(width=0.5, color='#F1C40F'),
|
240 |
+
stackgroup='one',
|
241 |
+
name='High'
|
242 |
+
))
|
243 |
+
|
244 |
+
fig.add_trace(go.Scatter(
|
245 |
+
x=trend_data['Date'], y=trend_data['Medium'],
|
246 |
+
mode='lines',
|
247 |
+
line=dict(width=0.5, color='#3498DB'),
|
248 |
+
stackgroup='one',
|
249 |
+
name='Medium'
|
250 |
+
))
|
251 |
+
|
252 |
+
fig.add_trace(go.Scatter(
|
253 |
+
x=trend_data['Date'], y=trend_data['Low'],
|
254 |
+
mode='lines',
|
255 |
+
line=dict(width=0.5, color='#2ECC71'),
|
256 |
+
stackgroup='one',
|
257 |
+
name='Low'
|
258 |
+
))
|
259 |
+
|
260 |
+
fig.update_layout(
|
261 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
262 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
263 |
+
legend=dict(
|
264 |
+
orientation="h",
|
265 |
+
yanchor="bottom",
|
266 |
+
y=1.02,
|
267 |
+
xanchor="right",
|
268 |
+
x=1
|
269 |
+
),
|
270 |
+
margin=dict(l=0, r=0, t=30, b=0),
|
271 |
+
xaxis=dict(
|
272 |
+
showgrid=False,
|
273 |
+
title=None,
|
274 |
+
tickfont=dict(color='#ECF0F1')
|
275 |
+
),
|
276 |
+
yaxis=dict(
|
277 |
+
showgrid=True,
|
278 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
279 |
+
title="Alert Count",
|
280 |
+
tickfont=dict(color='#ECF0F1')
|
281 |
+
),
|
282 |
+
height=400
|
283 |
+
)
|
284 |
+
|
285 |
+
st.plotly_chart(fig, use_container_width=True)
|
286 |
+
|
287 |
+
with tab2:
|
288 |
+
# Alert distribution by category
|
289 |
+
st.subheader("Alert Category Distribution")
|
290 |
+
|
291 |
+
# Sample data for categories
|
292 |
+
categories = ['Data Breach', 'Credentials', 'PII', 'Ransomware', 'Brand Abuse', 'Source Code', 'Infrastructure', 'Other']
|
293 |
+
counts = [35, 28, 18, 12, 8, 6, 4, 2]
|
294 |
+
|
295 |
+
category_data = pd.DataFrame({
|
296 |
+
'Category': categories,
|
297 |
+
'Count': counts
|
298 |
+
})
|
299 |
+
|
300 |
+
# Create a horizontal bar chart
|
301 |
+
fig = px.bar(
|
302 |
+
category_data,
|
303 |
+
y='Category',
|
304 |
+
x='Count',
|
305 |
+
orientation='h',
|
306 |
+
color='Count',
|
307 |
+
color_continuous_scale=['#2ECC71', '#3498DB', '#F1C40F', '#E74C3C'],
|
308 |
+
height=400
|
309 |
+
)
|
310 |
+
|
311 |
+
fig.update_layout(
|
312 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
313 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
314 |
+
coloraxis_showscale=False,
|
315 |
+
xaxis=dict(
|
316 |
+
title="Number of Alerts",
|
317 |
+
showgrid=True,
|
318 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
319 |
+
tickfont=dict(color='#ECF0F1')
|
320 |
+
),
|
321 |
+
yaxis=dict(
|
322 |
+
title=None,
|
323 |
+
showgrid=False,
|
324 |
+
tickfont=dict(color='#ECF0F1')
|
325 |
+
),
|
326 |
+
margin=dict(l=0, r=0, t=30, b=0)
|
327 |
+
)
|
328 |
+
|
329 |
+
st.plotly_chart(fig, use_container_width=True)
|
330 |
+
|
331 |
+
with tab3:
|
332 |
+
# Alert sources analysis
|
333 |
+
st.subheader("Alert Sources")
|
334 |
+
|
335 |
+
# Sample data for sources
|
336 |
+
sources = ['Dark Web Markets', 'Hacking Forums', 'Paste Sites', 'Telegram Channels', 'Ransomware Blogs', 'IRC Channels', 'Social Media']
|
337 |
+
source_counts = [32, 27, 18, 15, 10, 7, 4]
|
338 |
+
|
339 |
+
source_data = pd.DataFrame({
|
340 |
+
'Source': sources,
|
341 |
+
'Count': source_counts
|
342 |
+
})
|
343 |
+
|
344 |
+
# Create a pie chart
|
345 |
+
fig = px.pie(
|
346 |
+
source_data,
|
347 |
+
values='Count',
|
348 |
+
names='Source',
|
349 |
+
hole=0.4,
|
350 |
+
color_discrete_sequence=['#E74C3C', '#F1C40F', '#3498DB', '#2ECC71', '#9B59B6', '#E67E22', '#1ABC9C']
|
351 |
+
)
|
352 |
+
|
353 |
+
fig.update_layout(
|
354 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
355 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
356 |
+
showlegend=True,
|
357 |
+
legend=dict(
|
358 |
+
orientation="h",
|
359 |
+
yanchor="bottom",
|
360 |
+
y=-0.2,
|
361 |
+
xanchor="center",
|
362 |
+
x=0.5,
|
363 |
+
font=dict(color='#ECF0F1')
|
364 |
+
),
|
365 |
+
margin=dict(l=0, r=0, t=30, b=0),
|
366 |
+
height=400
|
367 |
+
)
|
368 |
+
|
369 |
+
st.plotly_chart(fig, use_container_width=True)
|
370 |
+
|
371 |
+
# Alert rules configuration
|
372 |
+
st.markdown("---")
|
373 |
+
st.subheader("Alert Rules Configuration")
|
374 |
+
|
375 |
+
# Tabs for different rule categories
|
376 |
+
rule_tab1, rule_tab2 = st.tabs(["Active Rules", "Rule Editor"])
|
377 |
+
|
378 |
+
with rule_tab1:
|
379 |
+
# Sample data for alert rules
|
380 |
+
alert_rules = pd.DataFrame({
|
381 |
+
"Rule Name": [
|
382 |
+
"Critical Data Breach Detection",
|
383 |
+
"Ransomware Victim Monitoring",
|
384 |
+
"Employee Credential Exposure",
|
385 |
+
"Source Code Leak Detection",
|
386 |
+
"Brand Impersonation Alert",
|
387 |
+
"Executive PII Monitoring",
|
388 |
+
"Infrastructure Exposure"
|
389 |
+
],
|
390 |
+
"Category": ["Data Breach", "Ransomware", "Credentials", "Source Code", "Brand Abuse", "PII", "Infrastructure"],
|
391 |
+
"Severity": ["Critical", "Critical", "High", "High", "Medium", "Critical", "Medium"],
|
392 |
+
"Sources": ["All", "Leak Sites", "Paste Sites, Forums", "Paste Sites, Forums", "All", "All", "Forums, Markets"],
|
393 |
+
"Status": ["Active", "Active", "Active", "Active", "Active", "Active", "Active"]
|
394 |
+
})
|
395 |
+
|
396 |
+
# Display rules table
|
397 |
+
st.dataframe(alert_rules, use_container_width=True)
|
398 |
+
|
399 |
+
# Rule action buttons
|
400 |
+
rule_col1, rule_col2, rule_col3, rule_col4 = st.columns(4)
|
401 |
+
|
402 |
+
with rule_col1:
|
403 |
+
st.button("Create New Rule", key="new_rule")
|
404 |
+
|
405 |
+
with rule_col2:
|
406 |
+
st.button("Edit Selected", key="edit_rule")
|
407 |
+
|
408 |
+
with rule_col3:
|
409 |
+
st.button("Duplicate", key="duplicate_rule")
|
410 |
+
|
411 |
+
with rule_col4:
|
412 |
+
st.button("Disable", key="disable_rule")
|
413 |
+
|
414 |
+
with rule_tab2:
|
415 |
+
# Rule editor form
|
416 |
+
with st.form("rule_editor"):
|
417 |
+
st.markdown("### Rule Editor")
|
418 |
+
|
419 |
+
rule_name = st.text_input("Rule Name", value="New Alert Rule")
|
420 |
+
|
421 |
+
editor_col1, editor_col2 = st.columns(2)
|
422 |
+
|
423 |
+
with editor_col1:
|
424 |
+
rule_category = st.selectbox(
|
425 |
+
"Category",
|
426 |
+
["Data Breach", "Ransomware", "Credentials", "PII", "Brand Abuse", "Source Code", "Infrastructure", "Other"]
|
427 |
+
)
|
428 |
+
|
429 |
+
rule_severity = st.selectbox(
|
430 |
+
"Severity",
|
431 |
+
["Critical", "High", "Medium", "Low"]
|
432 |
+
)
|
433 |
+
|
434 |
+
with editor_col2:
|
435 |
+
rule_sources = st.multiselect(
|
436 |
+
"Monitoring Sources",
|
437 |
+
["Dark Web Markets", "Hacking Forums", "Paste Sites", "Leak Sites", "Telegram Channels", "IRC Channels", "Social Media", "All"],
|
438 |
+
default=["All"]
|
439 |
+
)
|
440 |
+
|
441 |
+
rule_status = st.selectbox(
|
442 |
+
"Status",
|
443 |
+
["Active", "Disabled"]
|
444 |
+
)
|
445 |
+
|
446 |
+
st.markdown("### Rule Conditions")
|
447 |
+
|
448 |
+
condition_type = st.selectbox(
|
449 |
+
"Condition Type",
|
450 |
+
["Keyword Match", "Regular Expression", "Data Pattern", "Complex Query"]
|
451 |
+
)
|
452 |
+
|
453 |
+
if condition_type == "Keyword Match":
|
454 |
+
keywords = st.text_area("Keywords (one per line)", height=100)
|
455 |
+
|
456 |
+
keyword_options = st.columns(3)
|
457 |
+
with keyword_options[0]:
|
458 |
+
case_sensitive = st.checkbox("Case Sensitive", value=False)
|
459 |
+
with keyword_options[1]:
|
460 |
+
whole_word = st.checkbox("Whole Word Only", value=False)
|
461 |
+
with keyword_options[2]:
|
462 |
+
proximity = st.checkbox("Proximity Search", value=False)
|
463 |
+
|
464 |
+
elif condition_type == "Regular Expression":
|
465 |
+
regex_pattern = st.text_area("Regular Expression Pattern", height=100)
|
466 |
+
|
467 |
+
regex_options = st.columns(2)
|
468 |
+
with regex_options[0]:
|
469 |
+
test_regex = st.button("Test RegEx")
|
470 |
+
with regex_options[1]:
|
471 |
+
validate_regex = st.button("Validate Pattern")
|
472 |
+
|
473 |
+
elif condition_type == "Data Pattern":
|
474 |
+
data_patterns = st.multiselect(
|
475 |
+
"Data Patterns to Detect",
|
476 |
+
["Email Addresses", "Credit Card Numbers", "Social Security Numbers", "Phone Numbers", "IP Addresses", "API Keys", "Passwords"]
|
477 |
+
)
|
478 |
+
|
479 |
+
elif condition_type == "Complex Query":
|
480 |
+
complex_query = st.text_area("Complex Query", height=100,
|
481 |
+
placeholder="Example: (keyword1 OR keyword2) AND (keyword3) NOT (keyword4)")
|
482 |
+
|
483 |
+
st.markdown("### Response Actions")
|
484 |
+
|
485 |
+
notification_channels = st.multiselect(
|
486 |
+
"Notification Channels",
|
487 |
+
["Email", "Slack", "API Webhook", "SMS"],
|
488 |
+
default=["Email", "Slack"]
|
489 |
+
)
|
490 |
+
|
491 |
+
auto_actions = st.multiselect(
|
492 |
+
"Automated Actions",
|
493 |
+
["Create Incident Ticket", "Add to Watchlist", "Block in Firewall", "None"],
|
494 |
+
default=["Create Incident Ticket"]
|
495 |
+
)
|
496 |
+
|
497 |
+
submit_rule = st.form_submit_button("Save Rule")
|
498 |
+
|
499 |
+
if submit_rule:
|
500 |
+
st.success("Alert rule saved successfully!")
|
501 |
+
|
502 |
+
# Alert notification settings
|
503 |
+
st.markdown("---")
|
504 |
+
st.subheader("Alert Notification Settings")
|
505 |
+
|
506 |
+
# Notification channels
|
507 |
+
notif_col1, notif_col2 = st.columns(2)
|
508 |
+
|
509 |
+
with notif_col1:
|
510 |
+
st.markdown("### Notification Channels")
|
511 |
+
|
512 |
+
with st.container():
|
513 |
+
st.checkbox("Email Notifications", value=True)
|
514 |
+
st.text_input("Email Recipients", value="[email protected], [email protected]")
|
515 |
+
|
516 |
+
st.checkbox("Slack Notifications", value=True)
|
517 |
+
st.text_input("Slack Channel", value="#security-alerts")
|
518 |
+
|
519 |
+
st.checkbox("SMS Notifications", value=False)
|
520 |
+
st.text_input("Phone Numbers", placeholder="+1234567890, +0987654321")
|
521 |
+
|
522 |
+
st.checkbox("API Webhook", value=False)
|
523 |
+
st.text_input("Webhook URL", placeholder="https://api.example.com/webhook")
|
524 |
+
|
525 |
+
with notif_col2:
|
526 |
+
st.markdown("### Notification Schedule")
|
527 |
+
|
528 |
+
with st.container():
|
529 |
+
notify_critical = st.radio(
|
530 |
+
"Critical Alerts",
|
531 |
+
["Immediate", "Hourly Digest", "Daily Digest"],
|
532 |
+
index=0
|
533 |
+
)
|
534 |
+
|
535 |
+
notify_high = st.radio(
|
536 |
+
"High Alerts",
|
537 |
+
["Immediate", "Hourly Digest", "Daily Digest"],
|
538 |
+
index=1
|
539 |
+
)
|
540 |
+
|
541 |
+
notify_medium = st.radio(
|
542 |
+
"Medium Alerts",
|
543 |
+
["Immediate", "Hourly Digest", "Daily Digest"],
|
544 |
+
index=2
|
545 |
+
)
|
546 |
+
|
547 |
+
notify_low = st.radio(
|
548 |
+
"Low Alerts",
|
549 |
+
["Immediate", "Hourly Digest", "Daily Digest"],
|
550 |
+
index=2
|
551 |
+
)
|
552 |
+
|
553 |
+
# Save alert settings button
|
554 |
+
st.button("Save Notification Settings", type="primary", key="save_notif")
|
components/dashboard.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import plotly.express as px
|
4 |
+
import plotly.graph_objects as go
|
5 |
+
import numpy as np
|
6 |
+
import altair as alt
|
7 |
+
from datetime import datetime, timedelta
|
8 |
+
|
9 |
+
def render_dashboard():
|
10 |
+
st.title("Dark Web Intelligence Dashboard")
|
11 |
+
|
12 |
+
# Date range selector
|
13 |
+
col1, col2 = st.columns([3, 1])
|
14 |
+
|
15 |
+
with col1:
|
16 |
+
st.markdown("## Overview")
|
17 |
+
st.markdown("Real-time monitoring of dark web activities, data breaches, and emerging threats.")
|
18 |
+
|
19 |
+
with col2:
|
20 |
+
date_range = st.selectbox(
|
21 |
+
"Time Range",
|
22 |
+
["Last 24 Hours", "Last 7 Days", "Last 30 Days", "Last Quarter", "Custom Range"],
|
23 |
+
index=1
|
24 |
+
)
|
25 |
+
|
26 |
+
# Dashboard metrics row
|
27 |
+
metric_col1, metric_col2, metric_col3, metric_col4 = st.columns(4)
|
28 |
+
|
29 |
+
with metric_col1:
|
30 |
+
st.metric(
|
31 |
+
label="Active Threats",
|
32 |
+
value="27",
|
33 |
+
delta="4",
|
34 |
+
delta_color="inverse"
|
35 |
+
)
|
36 |
+
|
37 |
+
with metric_col2:
|
38 |
+
st.metric(
|
39 |
+
label="Data Breaches",
|
40 |
+
value="3",
|
41 |
+
delta="-2",
|
42 |
+
delta_color="normal"
|
43 |
+
)
|
44 |
+
|
45 |
+
with metric_col3:
|
46 |
+
st.metric(
|
47 |
+
label="Credential Leaks",
|
48 |
+
value="1,247",
|
49 |
+
delta="89",
|
50 |
+
delta_color="inverse"
|
51 |
+
)
|
52 |
+
|
53 |
+
with metric_col4:
|
54 |
+
st.metric(
|
55 |
+
label="Threat Score",
|
56 |
+
value="72/100",
|
57 |
+
delta="12",
|
58 |
+
delta_color="inverse"
|
59 |
+
)
|
60 |
+
|
61 |
+
# First row - Threat map and category distribution
|
62 |
+
row1_col1, row1_col2 = st.columns([2, 1])
|
63 |
+
|
64 |
+
with row1_col1:
|
65 |
+
st.subheader("Global Threat Origin Map")
|
66 |
+
|
67 |
+
# World map of threat origins
|
68 |
+
fig = go.Figure(data=go.Choropleth(
|
69 |
+
locations=['USA', 'RUS', 'CHN', 'IRN', 'PRK', 'UKR', 'DEU', 'GBR', 'CAN', 'BRA', 'IND'],
|
70 |
+
z=[25, 42, 37, 30, 28, 18, 15, 20, 12, 14, 23],
|
71 |
+
colorscale='Reds',
|
72 |
+
autocolorscale=False,
|
73 |
+
reversescale=False,
|
74 |
+
marker_line_color='#2C3E50',
|
75 |
+
marker_line_width=0.5,
|
76 |
+
colorbar_title='Threat<br>Index',
|
77 |
+
))
|
78 |
+
|
79 |
+
fig.update_layout(
|
80 |
+
geo=dict(
|
81 |
+
showframe=False,
|
82 |
+
showcoastlines=True,
|
83 |
+
projection_type='equirectangular',
|
84 |
+
bgcolor='rgba(26, 26, 26, 0)',
|
85 |
+
coastlinecolor='#2C3E50',
|
86 |
+
landcolor='#1A1A1A',
|
87 |
+
oceancolor='#2C3E50',
|
88 |
+
),
|
89 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
90 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
91 |
+
margin=dict(l=0, r=0, t=0, b=0),
|
92 |
+
height=400,
|
93 |
+
)
|
94 |
+
|
95 |
+
st.plotly_chart(fig, use_container_width=True)
|
96 |
+
|
97 |
+
with row1_col2:
|
98 |
+
st.subheader("Threat Categories")
|
99 |
+
|
100 |
+
# Threat category distribution
|
101 |
+
categories = ['Data Breach', 'Ransomware', 'Phishing', 'Malware', 'Identity Theft']
|
102 |
+
values = [38, 24, 18, 14, 6]
|
103 |
+
|
104 |
+
fig = px.pie(
|
105 |
+
names=categories,
|
106 |
+
values=values,
|
107 |
+
hole=0.6,
|
108 |
+
color_discrete_sequence=['#E74C3C', '#F1C40F', '#3498DB', '#2ECC71', '#9B59B6']
|
109 |
+
)
|
110 |
+
|
111 |
+
fig.update_layout(
|
112 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
113 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
114 |
+
showlegend=True,
|
115 |
+
legend=dict(
|
116 |
+
orientation="v",
|
117 |
+
yanchor="middle",
|
118 |
+
y=0.5,
|
119 |
+
xanchor="center",
|
120 |
+
x=0.5
|
121 |
+
),
|
122 |
+
margin=dict(l=0, r=0, t=30, b=0),
|
123 |
+
height=300,
|
124 |
+
)
|
125 |
+
|
126 |
+
st.plotly_chart(fig, use_container_width=True)
|
127 |
+
|
128 |
+
# Second row - Trend and recent activities
|
129 |
+
row2_col1, row2_col2 = st.columns([3, 2])
|
130 |
+
|
131 |
+
with row2_col1:
|
132 |
+
st.subheader("Threat Activity Trend")
|
133 |
+
|
134 |
+
# Generate dates for the past 14 days
|
135 |
+
dates = [(datetime.now() - timedelta(days=i)).strftime('%Y-%m-%d') for i in range(14, 0, -1)]
|
136 |
+
|
137 |
+
# Sample data for threats over time
|
138 |
+
threat_data = {
|
139 |
+
'Date': dates,
|
140 |
+
'High': [12, 10, 15, 11, 14, 16, 18, 20, 17, 12, 14, 13, 19, 22],
|
141 |
+
'Medium': [23, 25, 22, 20, 24, 25, 26, 24, 22, 21, 23, 25, 28, 27],
|
142 |
+
'Low': [32, 30, 35, 34, 36, 33, 30, 34, 38, 37, 35, 34, 32, 30]
|
143 |
+
}
|
144 |
+
|
145 |
+
df = pd.DataFrame(threat_data)
|
146 |
+
|
147 |
+
# Create stacked area chart
|
148 |
+
fig = go.Figure()
|
149 |
+
|
150 |
+
fig.add_trace(go.Scatter(
|
151 |
+
x=df['Date'], y=df['High'],
|
152 |
+
mode='lines',
|
153 |
+
line=dict(width=0.5, color='#E74C3C'),
|
154 |
+
stackgroup='one',
|
155 |
+
name='High'
|
156 |
+
))
|
157 |
+
|
158 |
+
fig.add_trace(go.Scatter(
|
159 |
+
x=df['Date'], y=df['Medium'],
|
160 |
+
mode='lines',
|
161 |
+
line=dict(width=0.5, color='#F1C40F'),
|
162 |
+
stackgroup='one',
|
163 |
+
name='Medium'
|
164 |
+
))
|
165 |
+
|
166 |
+
fig.add_trace(go.Scatter(
|
167 |
+
x=df['Date'], y=df['Low'],
|
168 |
+
mode='lines',
|
169 |
+
line=dict(width=0.5, color='#2ECC71'),
|
170 |
+
stackgroup='one',
|
171 |
+
name='Low'
|
172 |
+
))
|
173 |
+
|
174 |
+
fig.update_layout(
|
175 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
176 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
177 |
+
legend=dict(
|
178 |
+
orientation="h",
|
179 |
+
yanchor="bottom",
|
180 |
+
y=1.02,
|
181 |
+
xanchor="right",
|
182 |
+
x=1
|
183 |
+
),
|
184 |
+
margin=dict(l=0, r=0, t=30, b=0),
|
185 |
+
xaxis=dict(
|
186 |
+
showgrid=False,
|
187 |
+
title=None,
|
188 |
+
tickfont=dict(color='#ECF0F1')
|
189 |
+
),
|
190 |
+
yaxis=dict(
|
191 |
+
showgrid=True,
|
192 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
193 |
+
title=None,
|
194 |
+
tickfont=dict(color='#ECF0F1')
|
195 |
+
),
|
196 |
+
height=300
|
197 |
+
)
|
198 |
+
|
199 |
+
st.plotly_chart(fig, use_container_width=True)
|
200 |
+
|
201 |
+
with row2_col2:
|
202 |
+
st.subheader("Recent Intelligence Feeds")
|
203 |
+
|
204 |
+
# Recent dark web activities
|
205 |
+
activities = [
|
206 |
+
{"time": "10 mins ago", "event": "New ransomware group identified", "severity": "High"},
|
207 |
+
{"time": "43 mins ago", "event": "Database with 50K credentials for sale", "severity": "High"},
|
208 |
+
{"time": "2 hours ago", "event": "Zero-day exploit being discussed", "severity": "Medium"},
|
209 |
+
{"time": "3 hours ago", "event": "New phishing campaign detected", "severity": "Medium"},
|
210 |
+
{"time": "5 hours ago", "event": "PII data from financial institution leaked", "severity": "High"}
|
211 |
+
]
|
212 |
+
|
213 |
+
for activity in activities:
|
214 |
+
severity_color = "#E74C3C" if activity["severity"] == "High" else "#F1C40F" if activity["severity"] == "Medium" else "#2ECC71"
|
215 |
+
|
216 |
+
cols = st.columns([1, 4, 1])
|
217 |
+
cols[0].caption(activity["time"])
|
218 |
+
cols[1].markdown(activity["event"])
|
219 |
+
cols[2].markdown(f"<span style='color:{severity_color}'>{activity['severity']}</span>", unsafe_allow_html=True)
|
220 |
+
|
221 |
+
st.markdown("---")
|
222 |
+
|
223 |
+
# Third row - Sectors at risk and trending keywords
|
224 |
+
row3_col1, row3_col2 = st.columns(2)
|
225 |
+
|
226 |
+
with row3_col1:
|
227 |
+
st.subheader("Sectors at Risk")
|
228 |
+
|
229 |
+
# Horizontal bar chart for sectors at risk
|
230 |
+
sectors = ['Healthcare', 'Finance', 'Technology', 'Education', 'Government', 'Manufacturing']
|
231 |
+
risk_scores = [87, 82, 75, 63, 78, 56]
|
232 |
+
|
233 |
+
sector_data = pd.DataFrame({
|
234 |
+
'Sector': sectors,
|
235 |
+
'Risk Score': risk_scores
|
236 |
+
})
|
237 |
+
|
238 |
+
fig = px.bar(
|
239 |
+
sector_data,
|
240 |
+
x='Risk Score',
|
241 |
+
y='Sector',
|
242 |
+
orientation='h',
|
243 |
+
color='Risk Score',
|
244 |
+
color_continuous_scale=['#2ECC71', '#F1C40F', '#E74C3C'],
|
245 |
+
range_color=[50, 100]
|
246 |
+
)
|
247 |
+
|
248 |
+
fig.update_layout(
|
249 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
250 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
251 |
+
margin=dict(l=0, r=0, t=0, b=0),
|
252 |
+
height=250,
|
253 |
+
coloraxis_showscale=False,
|
254 |
+
xaxis=dict(
|
255 |
+
showgrid=False,
|
256 |
+
title=None,
|
257 |
+
tickfont=dict(color='#ECF0F1')
|
258 |
+
),
|
259 |
+
yaxis=dict(
|
260 |
+
showgrid=False,
|
261 |
+
title=None,
|
262 |
+
tickfont=dict(color='#ECF0F1')
|
263 |
+
)
|
264 |
+
)
|
265 |
+
|
266 |
+
st.plotly_chart(fig, use_container_width=True)
|
267 |
+
|
268 |
+
with row3_col2:
|
269 |
+
st.subheader("Trending Keywords")
|
270 |
+
|
271 |
+
# Word cloud alternative - trending keywords with frequency
|
272 |
+
keywords = [
|
273 |
+
{"word": "ransomware", "count": 42},
|
274 |
+
{"word": "zero-day", "count": 37},
|
275 |
+
{"word": "botnet", "count": 31},
|
276 |
+
{"word": "credentials", "count": 28},
|
277 |
+
{"word": "bitcoin", "count": 25},
|
278 |
+
{"word": "exploit", "count": 23},
|
279 |
+
{"word": "malware", "count": 21},
|
280 |
+
{"word": "backdoor", "count": 18},
|
281 |
+
{"word": "phishing", "count": 16},
|
282 |
+
{"word": "darknet", "count": 15}
|
283 |
+
]
|
284 |
+
|
285 |
+
keyword_data = pd.DataFrame(keywords)
|
286 |
+
|
287 |
+
# Calculate sizes for visual representation
|
288 |
+
max_count = max(keyword_data['count'])
|
289 |
+
keyword_data['size'] = keyword_data['count'].apply(lambda x: int((x / max_count) * 100) + 70)
|
290 |
+
|
291 |
+
# Create a simple horizontal bar to represent frequency
|
292 |
+
chart = alt.Chart(keyword_data).mark_bar().encode(
|
293 |
+
x=alt.X('count:Q', title=None),
|
294 |
+
y=alt.Y('word:N', title=None, sort='-x'),
|
295 |
+
color=alt.Color('count:Q', scale=alt.Scale(scheme='reds'), legend=None)
|
296 |
+
).properties(
|
297 |
+
height=250
|
298 |
+
)
|
299 |
+
|
300 |
+
st.altair_chart(chart, use_container_width=True)
|
301 |
+
|
302 |
+
# Fourth row - Latest intelligence reports
|
303 |
+
st.subheader("Latest Intelligence Reports")
|
304 |
+
|
305 |
+
reports = [
|
306 |
+
{
|
307 |
+
"title": "Major Healthcare Breach Analysis",
|
308 |
+
"date": "2025-04-08",
|
309 |
+
"summary": "Analysis of recent healthcare data breach affecting over 500,000 patient records.",
|
310 |
+
"severity": "Critical"
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"title": "Emerging Ransomware Group Activities",
|
314 |
+
"date": "2025-04-07",
|
315 |
+
"summary": "New ransomware group targeting financial institutions with sophisticated techniques.",
|
316 |
+
"severity": "High"
|
317 |
+
},
|
318 |
+
{
|
319 |
+
"title": "Credential Harvesting Campaign",
|
320 |
+
"date": "2025-04-05",
|
321 |
+
"summary": "Widespread phishing campaign targeting corporate credentials across multiple sectors.",
|
322 |
+
"severity": "Medium"
|
323 |
+
}
|
324 |
+
]
|
325 |
+
|
326 |
+
row4_cols = st.columns(3)
|
327 |
+
|
328 |
+
for i, report in enumerate(reports):
|
329 |
+
with row4_cols[i]:
|
330 |
+
severity_color = "#E74C3C" if report["severity"] == "Critical" else "#F1C40F" if report["severity"] == "High" else "#2ECC71"
|
331 |
+
|
332 |
+
st.markdown(f"#### {report['title']}")
|
333 |
+
st.markdown(f"<span style='color:{severity_color}'>{report['severity']}</span> | {report['date']}", unsafe_allow_html=True)
|
334 |
+
st.markdown(report["summary"])
|
335 |
+
st.button("View Full Report", key=f"report_{i}")
|
components/live_feed.py
ADDED
@@ -0,0 +1,769 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import time
|
4 |
+
from datetime import datetime, timedelta
|
5 |
+
import random
|
6 |
+
import plotly.graph_objects as go
|
7 |
+
import trafilatura
|
8 |
+
import threading
|
9 |
+
import queue
|
10 |
+
|
11 |
+
# Global queue for storing live feed events
|
12 |
+
feed_queue = queue.Queue(maxsize=100)
|
13 |
+
|
14 |
+
# Sample dark web sources for simulation
|
15 |
+
DARK_WEB_SOURCES = [
|
16 |
+
"AlphaBay Market", "BreachForums", "XSS Forum", "RaidForums", "DeepPaste",
|
17 |
+
"BlackHat Forum", "DarkLeak Site", "HackTown", "Exploit.in", "0day.today",
|
18 |
+
"Telegram Channel: DarkLeaks", "Telegram Channel: DataBreach", "BitHunters IRC",
|
19 |
+
"Genesis Market", "ASAP Market", "Tor Network: Hidden Services", "DarkNetLive"
|
20 |
+
]
|
21 |
+
|
22 |
+
# Sample event types and severities
|
23 |
+
EVENT_TYPES = {
|
24 |
+
"Credential Leak": ["Critical", "High"],
|
25 |
+
"Data Breach": ["Critical", "High", "Medium"],
|
26 |
+
"Ransomware Activity": ["Critical", "High"],
|
27 |
+
"Hacking Tool": ["Medium", "Low"],
|
28 |
+
"Zero-day Exploit": ["Critical", "High"],
|
29 |
+
"Phishing Campaign": ["High", "Medium"],
|
30 |
+
"Dark Web Mention": ["Medium", "Low"],
|
31 |
+
"PII Exposure": ["Critical", "High"],
|
32 |
+
"New Marketplace Listing": ["Medium", "Low"],
|
33 |
+
"Threat Actor Communication": ["High", "Medium"],
|
34 |
+
"Malware Sample": ["High", "Medium", "Low"],
|
35 |
+
"Source Code Leak": ["High", "Medium"]
|
36 |
+
}
|
37 |
+
|
38 |
+
# Keywords associated with your organization
|
39 |
+
MONITORED_KEYWORDS = [
|
40 |
+
"company.com", "companyname", "company name", "CompanyX", "ServiceY", "ProductZ",
|
41 |
+
"company database", "company credentials", "company breach", "company leak",
|
42 |
+
"@company.com", "CEO Name", "CTO Name", "internal documents"
|
43 |
+
]
|
44 |
+
|
45 |
+
# Industries for sector-based alerts
|
46 |
+
INDUSTRIES = [
|
47 |
+
"Healthcare", "Finance", "Technology", "Education", "Government",
|
48 |
+
"Manufacturing", "Retail", "Energy", "Telecommunications", "Transportation"
|
49 |
+
]
|
50 |
+
|
51 |
+
def generate_live_event():
|
52 |
+
"""Generate a simulated live dark web event for demonstration"""
|
53 |
+
current_time = datetime.now()
|
54 |
+
|
55 |
+
# Choose event type and severity
|
56 |
+
event_type = random.choice(list(EVENT_TYPES.keys()))
|
57 |
+
severity = random.choice(EVENT_TYPES[event_type])
|
58 |
+
|
59 |
+
# Choose source
|
60 |
+
source = random.choice(DARK_WEB_SOURCES)
|
61 |
+
|
62 |
+
# Determine if it should mention a monitored keyword (higher chance for critical events)
|
63 |
+
mention_keyword = random.random() < (0.8 if severity == "Critical" else 0.3)
|
64 |
+
keyword = random.choice(MONITORED_KEYWORDS) if mention_keyword else None
|
65 |
+
|
66 |
+
# Choose affected industry
|
67 |
+
industry = random.choice(INDUSTRIES)
|
68 |
+
|
69 |
+
# Generate description
|
70 |
+
if keyword:
|
71 |
+
descriptions = [
|
72 |
+
f"Detected {event_type.lower()} involving {keyword}",
|
73 |
+
f"{keyword} mentioned in context of {event_type.lower()}",
|
74 |
+
f"Potential {event_type.lower()} related to {keyword}",
|
75 |
+
f"New {severity.lower()} severity {event_type.lower()} containing {keyword}",
|
76 |
+
f"Alert: {event_type} with reference to {keyword}"
|
77 |
+
]
|
78 |
+
else:
|
79 |
+
descriptions = [
|
80 |
+
f"New {event_type} affecting {industry} sector",
|
81 |
+
f"Detected {event_type.lower()} targeting {industry} organizations",
|
82 |
+
f"Emerging {event_type.lower()} with {severity.lower()} impact",
|
83 |
+
f"Potential {industry} sector {event_type.lower()} identified",
|
84 |
+
f"{severity} {event_type} observed in {source}"
|
85 |
+
]
|
86 |
+
|
87 |
+
description = random.choice(descriptions)
|
88 |
+
|
89 |
+
# Generate event ID
|
90 |
+
event_id = f"EVT-{current_time.strftime('%y%m%d')}-{random.randint(1000, 9999)}"
|
91 |
+
|
92 |
+
# Create event dictionary
|
93 |
+
event = {
|
94 |
+
"id": event_id,
|
95 |
+
"timestamp": current_time,
|
96 |
+
"event_type": event_type,
|
97 |
+
"severity": severity,
|
98 |
+
"source": source,
|
99 |
+
"description": description,
|
100 |
+
"industry": industry,
|
101 |
+
"relevant": mention_keyword
|
102 |
+
}
|
103 |
+
|
104 |
+
return event
|
105 |
+
|
106 |
+
def start_feed_generator():
|
107 |
+
"""Start background thread to generate feed events"""
|
108 |
+
def generate_events():
|
109 |
+
while True:
|
110 |
+
# Generate a new event
|
111 |
+
event = generate_live_event()
|
112 |
+
|
113 |
+
# Add to queue, remove oldest if full
|
114 |
+
if feed_queue.full():
|
115 |
+
try:
|
116 |
+
feed_queue.get_nowait()
|
117 |
+
except queue.Empty:
|
118 |
+
pass
|
119 |
+
|
120 |
+
try:
|
121 |
+
feed_queue.put_nowait(event)
|
122 |
+
except queue.Full:
|
123 |
+
pass
|
124 |
+
|
125 |
+
# Sleep random interval (2-15 seconds)
|
126 |
+
sleep_time = random.uniform(2, 15)
|
127 |
+
time.sleep(sleep_time)
|
128 |
+
|
129 |
+
# Start the background thread
|
130 |
+
thread = threading.Thread(target=generate_events, daemon=True)
|
131 |
+
thread.start()
|
132 |
+
|
133 |
+
def render_live_feed():
|
134 |
+
st.title("Real-Time Dark Web Monitoring")
|
135 |
+
|
136 |
+
# Initialize the feed generator if it's not already running
|
137 |
+
if 'feed_initialized' not in st.session_state:
|
138 |
+
start_feed_generator()
|
139 |
+
st.session_state.feed_initialized = True
|
140 |
+
st.session_state.feed_events = []
|
141 |
+
st.session_state.last_update = datetime.now()
|
142 |
+
|
143 |
+
# Dashboard layout
|
144 |
+
col1, col2, col3 = st.columns([1, 2, 1])
|
145 |
+
|
146 |
+
with col1:
|
147 |
+
st.markdown("### Monitoring Status")
|
148 |
+
|
149 |
+
# Display monitoring metrics
|
150 |
+
st.metric(
|
151 |
+
label="Active Crawlers",
|
152 |
+
value=str(random.randint(12, 18)),
|
153 |
+
delta=str(random.randint(-2, 3))
|
154 |
+
)
|
155 |
+
|
156 |
+
st.metric(
|
157 |
+
label="Sources Coverage",
|
158 |
+
value=f"{random.randint(85, 98)}%",
|
159 |
+
delta=f"{random.randint(-2, 3)}%"
|
160 |
+
)
|
161 |
+
|
162 |
+
st.metric(
|
163 |
+
label="Scan Frequency",
|
164 |
+
value=f"{random.randint(3, 7)} min",
|
165 |
+
delta=f"{random.choice([-1, -0.5, 0, 0.5])} min",
|
166 |
+
delta_color="inverse"
|
167 |
+
)
|
168 |
+
|
169 |
+
# Filters for live feed
|
170 |
+
st.markdown("### Feed Filters")
|
171 |
+
|
172 |
+
severity_filter = st.multiselect(
|
173 |
+
"Severity",
|
174 |
+
["Critical", "High", "Medium", "Low"],
|
175 |
+
default=["Critical", "High"]
|
176 |
+
)
|
177 |
+
|
178 |
+
source_type = st.multiselect(
|
179 |
+
"Source Type",
|
180 |
+
["Market", "Forum", "Telegram", "IRC", "Paste Site", "Leak Site"],
|
181 |
+
default=["Market", "Forum", "Leak Site"]
|
182 |
+
)
|
183 |
+
|
184 |
+
relevant_only = st.checkbox("Show Relevant Alerts Only", value=True)
|
185 |
+
|
186 |
+
auto_refresh = st.checkbox("Auto-Refresh Feed", value=True)
|
187 |
+
|
188 |
+
if st.button("Refresh Now"):
|
189 |
+
st.session_state.last_update = datetime.now()
|
190 |
+
|
191 |
+
with col2:
|
192 |
+
st.markdown("### Live Intelligence Feed")
|
193 |
+
|
194 |
+
# Get events from queue and merge with existing events
|
195 |
+
new_events = []
|
196 |
+
while not feed_queue.empty():
|
197 |
+
try:
|
198 |
+
new_events.append(feed_queue.get_nowait())
|
199 |
+
except queue.Empty:
|
200 |
+
break
|
201 |
+
|
202 |
+
if new_events:
|
203 |
+
st.session_state.feed_events = new_events + st.session_state.feed_events
|
204 |
+
st.session_state.feed_events = st.session_state.feed_events[:100] # Keep only 100 most recent
|
205 |
+
st.session_state.last_update = datetime.now()
|
206 |
+
|
207 |
+
# Filter events
|
208 |
+
filtered_events = []
|
209 |
+
for event in st.session_state.feed_events:
|
210 |
+
if event["severity"] in severity_filter:
|
211 |
+
if not relevant_only or event["relevant"]:
|
212 |
+
source_match = False
|
213 |
+
for s_type in source_type:
|
214 |
+
if s_type.lower() in event["source"].lower():
|
215 |
+
source_match = True
|
216 |
+
break
|
217 |
+
if source_match or not source_type:
|
218 |
+
filtered_events.append(event)
|
219 |
+
|
220 |
+
# Display last updated time
|
221 |
+
st.caption(f"Last updated: {st.session_state.last_update.strftime('%H:%M:%S')}")
|
222 |
+
|
223 |
+
# Display events
|
224 |
+
if not filtered_events:
|
225 |
+
st.info("No events match your current filters. Adjust filters or wait for new events.")
|
226 |
+
else:
|
227 |
+
for i, event in enumerate(filtered_events[:20]): # Show only 20 most recent
|
228 |
+
# Determine the color based on severity
|
229 |
+
if event["severity"] == "Critical":
|
230 |
+
severity_color = "#E74C3C"
|
231 |
+
elif event["severity"] == "High":
|
232 |
+
severity_color = "#F1C40F"
|
233 |
+
elif event["severity"] == "Medium":
|
234 |
+
severity_color = "#3498DB"
|
235 |
+
else:
|
236 |
+
severity_color = "#2ECC71"
|
237 |
+
|
238 |
+
# Event container with colored border based on severity
|
239 |
+
with st.container():
|
240 |
+
cols = st.columns([3, 1])
|
241 |
+
|
242 |
+
# Event details
|
243 |
+
with cols[0]:
|
244 |
+
st.markdown(f"""
|
245 |
+
<div style="border-left: 4px solid {severity_color}; padding-left: 10px;">
|
246 |
+
<span style="color: {severity_color}; font-weight: bold;">{event['severity']}</span> | {event['event_type']}
|
247 |
+
<br><span style="font-size: 0.9em;">{event['description']}</span>
|
248 |
+
<br><span style="font-size: 0.8em; color: #7F8C8D;">Source: {event['source']} | ID: {event['id']}</span>
|
249 |
+
</div>
|
250 |
+
""", unsafe_allow_html=True)
|
251 |
+
|
252 |
+
# Timestamp and actions
|
253 |
+
with cols[1]:
|
254 |
+
# Format time as relative (e.g., "2 mins ago")
|
255 |
+
time_diff = datetime.now() - event["timestamp"]
|
256 |
+
minutes_ago = time_diff.total_seconds() / 60
|
257 |
+
|
258 |
+
if minutes_ago < 1:
|
259 |
+
time_str = "just now"
|
260 |
+
elif minutes_ago < 60:
|
261 |
+
time_str = f"{int(minutes_ago)} min ago"
|
262 |
+
else:
|
263 |
+
hours = int(minutes_ago / 60)
|
264 |
+
time_str = f"{hours} hrs ago"
|
265 |
+
|
266 |
+
st.markdown(f"<span style='font-size: 0.8em;'>{time_str}</span>", unsafe_allow_html=True)
|
267 |
+
|
268 |
+
# Action buttons
|
269 |
+
if st.button("Investigate", key=f"investigate_{i}"):
|
270 |
+
st.session_state.selected_event = event
|
271 |
+
|
272 |
+
# Add a subtle divider
|
273 |
+
st.markdown("<hr style='margin: 5px 0; opacity: 0.2;'>", unsafe_allow_html=True)
|
274 |
+
|
275 |
+
with col3:
|
276 |
+
st.markdown("### Intelligence Summary")
|
277 |
+
|
278 |
+
# Current severity distribution
|
279 |
+
severity_counts = {"Critical": 0, "High": 0, "Medium": 0, "Low": 0}
|
280 |
+
for event in st.session_state.feed_events:
|
281 |
+
if event["severity"] in severity_counts:
|
282 |
+
severity_counts[event["severity"]] += 1
|
283 |
+
|
284 |
+
# Create donut chart for severity distribution
|
285 |
+
fig = go.Figure(go.Pie(
|
286 |
+
labels=list(severity_counts.keys()),
|
287 |
+
values=list(severity_counts.values()),
|
288 |
+
hole=.6,
|
289 |
+
marker=dict(colors=['#E74C3C', '#F1C40F', '#3498DB', '#2ECC71'])
|
290 |
+
))
|
291 |
+
|
292 |
+
fig.update_layout(
|
293 |
+
showlegend=True,
|
294 |
+
margin=dict(t=0, b=0, l=0, r=0),
|
295 |
+
legend=dict(
|
296 |
+
orientation="h",
|
297 |
+
yanchor="bottom",
|
298 |
+
y=-0.2,
|
299 |
+
xanchor="center",
|
300 |
+
x=0.5
|
301 |
+
),
|
302 |
+
paper_bgcolor='rgba(0,0,0,0)',
|
303 |
+
plot_bgcolor='rgba(0,0,0,0)',
|
304 |
+
height=200
|
305 |
+
)
|
306 |
+
|
307 |
+
st.plotly_chart(fig, use_container_width=True)
|
308 |
+
|
309 |
+
# Top mentioned industries
|
310 |
+
st.markdown("#### Top Targeted Industries")
|
311 |
+
|
312 |
+
industry_counts = {}
|
313 |
+
for event in st.session_state.feed_events:
|
314 |
+
industry = event["industry"]
|
315 |
+
industry_counts[industry] = industry_counts.get(industry, 0) + 1
|
316 |
+
|
317 |
+
# Sort industries by count and take top 5
|
318 |
+
top_industries = sorted(industry_counts.items(), key=lambda x: x[1], reverse=True)[:5]
|
319 |
+
|
320 |
+
for industry, count in top_industries:
|
321 |
+
st.markdown(f"• {industry}: **{count}** alerts")
|
322 |
+
|
323 |
+
# Trending threats
|
324 |
+
st.markdown("#### Trending Threats")
|
325 |
+
|
326 |
+
event_type_counts = {}
|
327 |
+
for event in st.session_state.feed_events:
|
328 |
+
event_type = event["event_type"]
|
329 |
+
event_type_counts[event_type] = event_type_counts.get(event_type, 0) + 1
|
330 |
+
|
331 |
+
# Sort event types by count and take top 5
|
332 |
+
top_threats = sorted(event_type_counts.items(), key=lambda x: x[1], reverse=True)[:5]
|
333 |
+
|
334 |
+
for threat, count in top_threats:
|
335 |
+
st.markdown(f"• {threat}: **{count}** alerts")
|
336 |
+
|
337 |
+
# Add a quick investigate button for the most recent critical event
|
338 |
+
st.markdown("---")
|
339 |
+
st.markdown("#### Urgent Action Required")
|
340 |
+
|
341 |
+
critical_events = [e for e in st.session_state.feed_events if e["severity"] == "Critical"]
|
342 |
+
if critical_events:
|
343 |
+
latest_critical = critical_events[0]
|
344 |
+
st.error(f"""
|
345 |
+
**{latest_critical['event_type']}**
|
346 |
+
{latest_critical['description']}
|
347 |
+
""")
|
348 |
+
|
349 |
+
if st.button("Investigate Now", key="urgent_investigate"):
|
350 |
+
st.session_state.selected_event = latest_critical
|
351 |
+
else:
|
352 |
+
st.success("No critical events requiring urgent attention")
|
353 |
+
|
354 |
+
# If an event is selected for investigation, show details
|
355 |
+
if 'selected_event' in st.session_state and st.session_state.selected_event:
|
356 |
+
event = st.session_state.selected_event
|
357 |
+
|
358 |
+
st.markdown("---")
|
359 |
+
st.markdown("## Event Investigation")
|
360 |
+
|
361 |
+
event_col1, event_col2 = st.columns([3, 1])
|
362 |
+
|
363 |
+
with event_col1:
|
364 |
+
st.markdown(f"### {event['event_type']}")
|
365 |
+
st.markdown(f"**ID:** {event['id']}")
|
366 |
+
st.markdown(f"**Description:** {event['description']}")
|
367 |
+
st.markdown(f"**Source:** {event['source']}")
|
368 |
+
st.markdown(f"**Industry:** {event['industry']}")
|
369 |
+
st.markdown(f"**Detected:** {event['timestamp'].strftime('%Y-%m-%d %H:%M:%S')}")
|
370 |
+
st.markdown(f"**Severity:** {event['severity']}")
|
371 |
+
|
372 |
+
with event_col2:
|
373 |
+
severity_color = "#E74C3C" if event["severity"] == "Critical" else "#F1C40F" if event["severity"] == "High" else "#3498DB" if event["severity"] == "Medium" else "#2ECC71"
|
374 |
+
|
375 |
+
st.markdown(f"""
|
376 |
+
<div style="background-color: {severity_color}20; padding: 10px; border-radius: 5px; border-left: 4px solid {severity_color};">
|
377 |
+
<h4 style="margin: 0; color: {severity_color};">Risk Assessment</h4>
|
378 |
+
<p>Severity: <b>{event['severity']}</b></p>
|
379 |
+
<p>Confidence: <b>{random.randint(70, 95)}%</b></p>
|
380 |
+
<p>Impact: <b>{'High' if event['severity'] in ['Critical', 'High'] else 'Medium'}</b></p>
|
381 |
+
</div>
|
382 |
+
""", unsafe_allow_html=True)
|
383 |
+
|
384 |
+
# Tabs for different investigation aspects
|
385 |
+
inv_tab1, inv_tab2, inv_tab3 = st.tabs(["Analysis", "Similar Events", "Recommendations"])
|
386 |
+
|
387 |
+
with inv_tab1:
|
388 |
+
st.markdown("### Event Analysis")
|
389 |
+
|
390 |
+
# Simulated content analysis
|
391 |
+
st.markdown("#### Content Analysis")
|
392 |
+
st.markdown("""
|
393 |
+
This event represents a potential security incident that requires investigation.
|
394 |
+
The key indicators suggest this could be related to targeted activity against your organization
|
395 |
+
or the wider industry sector.
|
396 |
+
|
397 |
+
**Key Indicators:**
|
398 |
+
* Event type and severity level
|
399 |
+
* Source credibility assessment
|
400 |
+
* Contextual mentions and relationships
|
401 |
+
* Temporal correlation with known threat activities
|
402 |
+
""")
|
403 |
+
|
404 |
+
# Simulated indicators of compromise
|
405 |
+
st.markdown("#### Indicators of Compromise")
|
406 |
+
ioc_data = {
|
407 |
+
"IP Addresses": [f"192.168.{random.randint(1, 254)}.{random.randint(1, 254)}" for _ in range(3)],
|
408 |
+
"Domains": [f"malicious{random.randint(100, 999)}.{random.choice(['com', 'net', 'org'])}" for _ in range(2)],
|
409 |
+
"File Hashes": [f"{''.join(random.choices('0123456789abcdef', k=64))}" for _ in range(2)]
|
410 |
+
}
|
411 |
+
|
412 |
+
for ioc_type, items in ioc_data.items():
|
413 |
+
st.markdown(f"**{ioc_type}:**")
|
414 |
+
for item in items:
|
415 |
+
st.code(item)
|
416 |
+
|
417 |
+
with inv_tab2:
|
418 |
+
st.markdown("### Related Events")
|
419 |
+
|
420 |
+
# Generate a few similar events
|
421 |
+
similar_events = []
|
422 |
+
for _ in range(3):
|
423 |
+
similar_event = generate_live_event()
|
424 |
+
similar_event["event_type"] = event["event_type"]
|
425 |
+
similar_event["severity"] = random.choice(EVENT_TYPES[event["event_type"]])
|
426 |
+
similar_event["timestamp"] = event["timestamp"] - timedelta(days=random.randint(1, 30))
|
427 |
+
similar_events.append(similar_event)
|
428 |
+
|
429 |
+
# Display similar events
|
430 |
+
for i, similar in enumerate(similar_events):
|
431 |
+
with st.container():
|
432 |
+
st.markdown(f"""
|
433 |
+
**{similar['event_type']} ({similar['severity']})**
|
434 |
+
{similar['description']}
|
435 |
+
*Detected: {similar['timestamp'].strftime('%Y-%m-%d')} | Source: {similar['source']}*
|
436 |
+
""")
|
437 |
+
|
438 |
+
if i < len(similar_events) - 1:
|
439 |
+
st.markdown("---")
|
440 |
+
|
441 |
+
with inv_tab3:
|
442 |
+
st.markdown("### Recommended Actions")
|
443 |
+
|
444 |
+
# Generic recommendations based on event type
|
445 |
+
recommendations = {
|
446 |
+
"Data Breach": [
|
447 |
+
"Verify if the leaked data belongs to your organization",
|
448 |
+
"Identify affected systems and users",
|
449 |
+
"Initiate your incident response plan",
|
450 |
+
"Prepare for potential notification requirements",
|
451 |
+
"Monitor for misuse of the compromised data"
|
452 |
+
],
|
453 |
+
"Credential Leak": [
|
454 |
+
"Force password resets for affected accounts",
|
455 |
+
"Enable multi-factor authentication where possible",
|
456 |
+
"Monitor for unauthorized access attempts",
|
457 |
+
"Review privileged access controls",
|
458 |
+
"Scan for credentials used across multiple systems"
|
459 |
+
],
|
460 |
+
"Ransomware Activity": [
|
461 |
+
"Verify backup integrity and availability",
|
462 |
+
"Isolate potentially affected systems",
|
463 |
+
"Review security controls for ransomware protection",
|
464 |
+
"Assess exposure to the specific ransomware variant",
|
465 |
+
"Prepare business continuity procedures"
|
466 |
+
],
|
467 |
+
"Zero-day Exploit": [
|
468 |
+
"Assess if your systems use the affected software",
|
469 |
+
"Apply temporary mitigations or workarounds",
|
470 |
+
"Monitor vendor channels for patch availability",
|
471 |
+
"Increase monitoring for exploit attempts",
|
472 |
+
"Review defense-in-depth security controls"
|
473 |
+
],
|
474 |
+
"Phishing Campaign": [
|
475 |
+
"Alert employees about the phishing campaign",
|
476 |
+
"Block identified phishing domains and URLs",
|
477 |
+
"Scan email systems for instances of the phishing message",
|
478 |
+
"Review security awareness training materials",
|
479 |
+
"Deploy additional email security controls"
|
480 |
+
],
|
481 |
+
"Dark Web Mention": [
|
482 |
+
"Analyze context of the mention for potential threats",
|
483 |
+
"Review security for specifically mentioned assets",
|
484 |
+
"Increase monitoring for related activities",
|
485 |
+
"Brief relevant stakeholders on potential risks",
|
486 |
+
"Consider threat intelligence analysis for the mention"
|
487 |
+
]
|
488 |
+
}
|
489 |
+
|
490 |
+
# Get recommendations for the event type or use a default set
|
491 |
+
event_recommendations = recommendations.get(
|
492 |
+
event["event_type"],
|
493 |
+
["Investigate the alert details", "Assess potential impact", "Verify if your organization is affected"]
|
494 |
+
)
|
495 |
+
|
496 |
+
# Display recommendations
|
497 |
+
for rec in event_recommendations:
|
498 |
+
st.markdown(f"- {rec}")
|
499 |
+
|
500 |
+
# Action buttons
|
501 |
+
col1, col2 = st.columns(2)
|
502 |
+
with col1:
|
503 |
+
st.button("Add to Investigation Case", key="add_to_case")
|
504 |
+
with col2:
|
505 |
+
st.button("Mark as False Positive", key="mark_false_positive")
|
506 |
+
|
507 |
+
# Close investigation button
|
508 |
+
if st.button("Close Investigation", key="close_investigation"):
|
509 |
+
del st.session_state.selected_event
|
510 |
+
|
511 |
+
# Auto-refresh using a placeholder and empty to trigger rerun
|
512 |
+
if auto_refresh:
|
513 |
+
placeholder = st.empty()
|
514 |
+
time.sleep(30) # Refresh every 30 seconds
|
515 |
+
placeholder.empty()
|
516 |
+
st.rerun()
|
517 |
+
|
518 |
+
def fetch_dark_web_content(url):
|
519 |
+
"""
|
520 |
+
Fetch content from a dark web site (simulated for demonstration).
|
521 |
+
In a real application, this would connect to Tor network or similar.
|
522 |
+
|
523 |
+
Args:
|
524 |
+
url (str): The URL to fetch content from
|
525 |
+
|
526 |
+
Returns:
|
527 |
+
str: The extracted content
|
528 |
+
"""
|
529 |
+
# In a real scenario, you would use specialized tools to access dark web
|
530 |
+
# Here we'll simulate this with sample data
|
531 |
+
|
532 |
+
if "forum" in url.lower():
|
533 |
+
return """
|
534 |
+
--------- Dark Web Forum Excerpt ---------
|
535 |
+
|
536 |
+
User123: Looking for access to healthcare databases, paying premium
|
537 |
+
|
538 |
+
DarkSeller: Have fresh dump from major hospital, 50K+ patient records with PII and insurance info
|
539 |
+
|
540 |
+
User123: What's your price? Is it the Memorial Hospital data?
|
541 |
+
|
542 |
+
DarkSeller: 45 BTC for the full database. Yes, it's from Memorial plus two smaller clinics.
|
543 |
+
|
544 |
+
User456: I can vouch for DarkSeller, bought credentials last month, all valid.
|
545 |
+
|
546 |
+
DarkSeller: Sample available for serious buyers. Payment via escrow only.
|
547 |
+
"""
|
548 |
+
|
549 |
+
elif "market" in url.lower():
|
550 |
+
return """
|
551 |
+
--------- Dark Web Marketplace Listing ---------
|
552 |
+
|
553 |
+
ITEM: Complete patient database from major US hospital
|
554 |
+
SELLER: MedLeaks (Trusted Vendor ★★★★★)
|
555 |
+
PRICE: 45 BTC
|
556 |
+
|
557 |
+
DESCRIPTION:
|
558 |
+
Fresh database dump containing 50,000+ complete patient records including:
|
559 |
+
- Full names, DOB, SSN
|
560 |
+
- Home addresses and contact information
|
561 |
+
- Insurance policy details and ID numbers
|
562 |
+
- Medical diagnoses and treatment codes
|
563 |
+
- Billing information including payment methods
|
564 |
+
|
565 |
+
Data verified and ready for immediate delivery. Suitable for identity theft,
|
566 |
+
insurance fraud, or targeted phishing campaigns.
|
567 |
+
|
568 |
+
SHIPPING: Instant digital delivery via encrypted channel
|
569 |
+
TERMS: No refunds, escrow available
|
570 |
+
"""
|
571 |
+
|
572 |
+
else:
|
573 |
+
return """
|
574 |
+
--------- Dark Web Intelligence ---------
|
575 |
+
|
576 |
+
Multiple sources reporting new ransomware operation targeting healthcare sector.
|
577 |
+
Group appears to be using stolen credentials to access systems.
|
578 |
+
|
579 |
+
Identified C2 infrastructure:
|
580 |
+
- 185.212.x.x
|
581 |
+
- 91.223.x.x
|
582 |
+
- malware-delivery[.]xyz
|
583 |
+
|
584 |
+
Ransom demands ranging from 20-50 BTC depending on organization size.
|
585 |
+
Group is exfiltrating data before encryption and threatening publication.
|
586 |
+
"""
|
587 |
+
|
588 |
+
def render_content_analysis():
|
589 |
+
"""Display dark web content analysis tools"""
|
590 |
+
st.markdown("### Dark Web Content Analysis")
|
591 |
+
|
592 |
+
col1, col2 = st.columns([2, 1])
|
593 |
+
|
594 |
+
with col1:
|
595 |
+
st.markdown("Enter a URL or paste content for analysis:")
|
596 |
+
|
597 |
+
analysis_source = st.radio(
|
598 |
+
"Content Source",
|
599 |
+
["URL", "Pasted Content"],
|
600 |
+
horizontal=True
|
601 |
+
)
|
602 |
+
|
603 |
+
if analysis_source == "URL":
|
604 |
+
url = st.text_input("Enter Dark Web URL", value="darkforum.onion/thread/healthcare-data")
|
605 |
+
|
606 |
+
if st.button("Fetch Content", key="fetch_btn"):
|
607 |
+
with st.spinner("Connecting to dark web. Please wait..."):
|
608 |
+
time.sleep(2) # Simulate connection time
|
609 |
+
content = fetch_dark_web_content(url)
|
610 |
+
st.session_state.current_content = content
|
611 |
+
else:
|
612 |
+
content_input = st.text_area("Paste content for analysis", height=150)
|
613 |
+
|
614 |
+
if st.button("Analyze Content", key="analyze_pasted"):
|
615 |
+
st.session_state.current_content = content_input
|
616 |
+
|
617 |
+
with col2:
|
618 |
+
st.markdown("Analysis Options")
|
619 |
+
|
620 |
+
analysis_type = st.selectbox(
|
621 |
+
"Select Analysis Type",
|
622 |
+
["Entity Extraction", "Threat Detection", "Keyword Analysis", "IoC Extraction"]
|
623 |
+
)
|
624 |
+
|
625 |
+
st.markdown("---")
|
626 |
+
|
627 |
+
st.markdown("Monitored Keywords")
|
628 |
+
|
629 |
+
# Display monitored keywords
|
630 |
+
keyword_columns = st.columns(2)
|
631 |
+
|
632 |
+
for i, keyword in enumerate(MONITORED_KEYWORDS[:8]): # Show only first 8
|
633 |
+
with keyword_columns[i % 2]:
|
634 |
+
st.markdown(f"• {keyword}")
|
635 |
+
|
636 |
+
st.markdown("...")
|
637 |
+
|
638 |
+
st.markdown("---")
|
639 |
+
|
640 |
+
if st.button("Add Custom Keywords"):
|
641 |
+
st.session_state.show_keyword_input = True
|
642 |
+
|
643 |
+
if st.session_state.get("show_keyword_input", False):
|
644 |
+
new_keyword = st.text_input("Enter new keyword")
|
645 |
+
if st.button("Add Keyword"):
|
646 |
+
if new_keyword and new_keyword not in MONITORED_KEYWORDS:
|
647 |
+
MONITORED_KEYWORDS.append(new_keyword)
|
648 |
+
st.success(f"Added keyword: {new_keyword}")
|
649 |
+
|
650 |
+
# If we have content to analyze, show it and the analysis
|
651 |
+
if hasattr(st.session_state, "current_content") and st.session_state.current_content:
|
652 |
+
st.markdown("---")
|
653 |
+
|
654 |
+
tabs = st.tabs(["Content", "Analysis", "Entities", "Indicators"])
|
655 |
+
|
656 |
+
with tabs[0]:
|
657 |
+
st.markdown("### Raw Content")
|
658 |
+
st.text(st.session_state.current_content)
|
659 |
+
|
660 |
+
with tabs[1]:
|
661 |
+
st.markdown("### Content Analysis")
|
662 |
+
|
663 |
+
# Identify any monitored keywords in content
|
664 |
+
found_keywords = []
|
665 |
+
for keyword in MONITORED_KEYWORDS:
|
666 |
+
if keyword.lower() in st.session_state.current_content.lower():
|
667 |
+
found_keywords.append(keyword)
|
668 |
+
|
669 |
+
if found_keywords:
|
670 |
+
st.warning(f"Found {len(found_keywords)} monitored keywords in content:")
|
671 |
+
for keyword in found_keywords:
|
672 |
+
st.markdown(f"• **{keyword}**")
|
673 |
+
else:
|
674 |
+
st.info("No monitored keywords found in content.")
|
675 |
+
|
676 |
+
# Simple sentiment analysis
|
677 |
+
text = st.session_state.current_content.lower()
|
678 |
+
|
679 |
+
threat_terms = ["hack", "breach", "leak", "dump", "sell", "exploit", "vulnerability",
|
680 |
+
"ransomware", "malware", "phishing", "attack", "threat"]
|
681 |
+
|
682 |
+
threat_found = sum(term in text for term in threat_terms)
|
683 |
+
|
684 |
+
if threat_found > 3:
|
685 |
+
threat_level = "High"
|
686 |
+
color = "#E74C3C"
|
687 |
+
elif threat_found > 1:
|
688 |
+
threat_level = "Medium"
|
689 |
+
color = "#F1C40F"
|
690 |
+
else:
|
691 |
+
threat_level = "Low"
|
692 |
+
color = "#2ECC71"
|
693 |
+
|
694 |
+
st.markdown(f"**Threat Assessment: <span style='color:{color}'>{threat_level}</span>**", unsafe_allow_html=True)
|
695 |
+
st.markdown(f"Identified {threat_found} threat indicators in the content.")
|
696 |
+
|
697 |
+
with tabs[2]:
|
698 |
+
st.markdown("### Entities Extracted")
|
699 |
+
|
700 |
+
# Sample entity extraction
|
701 |
+
entities = {
|
702 |
+
"Organizations": ["Memorial Hospital", "MedLeaks"],
|
703 |
+
"Monetary Values": ["45 BTC", "20-50 BTC"],
|
704 |
+
"Quantities": ["50,000+ patient records", "50K+ patient records"],
|
705 |
+
"Locations": [],
|
706 |
+
"People": ["User123", "DarkSeller", "User456"]
|
707 |
+
}
|
708 |
+
|
709 |
+
for entity_type, items in entities.items():
|
710 |
+
if items:
|
711 |
+
st.markdown(f"#### {entity_type}")
|
712 |
+
for item in items:
|
713 |
+
st.markdown(f"• {item}")
|
714 |
+
|
715 |
+
with tabs[3]:
|
716 |
+
st.markdown("### Indicators of Compromise")
|
717 |
+
|
718 |
+
# Extract indicators from content
|
719 |
+
iocs = {
|
720 |
+
"IP Addresses": [],
|
721 |
+
"Domains": [],
|
722 |
+
"URLs": [],
|
723 |
+
"Hashes": []
|
724 |
+
}
|
725 |
+
|
726 |
+
# Very simple regex patterns for demo - in real system use more robust methods
|
727 |
+
ip_pattern = r'\b(?:\d{1,3}\.){3}\d{1,3}\b'
|
728 |
+
domain_pattern = r'\b(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,}\b'
|
729 |
+
url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+'
|
730 |
+
hash_pattern = r'\b[a-fA-F0-9]{32,64}\b'
|
731 |
+
|
732 |
+
import re
|
733 |
+
|
734 |
+
text = st.session_state.current_content
|
735 |
+
|
736 |
+
# Find IP addresses
|
737 |
+
iocs["IP Addresses"] = re.findall(ip_pattern, text)
|
738 |
+
|
739 |
+
# Find domains
|
740 |
+
domains = re.findall(domain_pattern, text)
|
741 |
+
iocs["Domains"] = [d for d in domains if ".onion" in d or ".xyz" in d] # Filter for interesting domains
|
742 |
+
|
743 |
+
# Find URLs
|
744 |
+
iocs["URLs"] = re.findall(url_pattern, text)
|
745 |
+
|
746 |
+
# Find hashes
|
747 |
+
iocs["Hashes"] = re.findall(hash_pattern, text)
|
748 |
+
|
749 |
+
# Display found IOCs
|
750 |
+
has_iocs = False
|
751 |
+
|
752 |
+
for ioc_type, items in iocs.items():
|
753 |
+
if items:
|
754 |
+
has_iocs = True
|
755 |
+
st.markdown(f"#### {ioc_type}")
|
756 |
+
for item in items:
|
757 |
+
st.code(item)
|
758 |
+
|
759 |
+
if not has_iocs:
|
760 |
+
st.info("No indicators of compromise detected in the content.")
|
761 |
+
|
762 |
+
# Actions
|
763 |
+
col1, col2 = st.columns(2)
|
764 |
+
|
765 |
+
with col1:
|
766 |
+
st.button("Export Indicators", key="export_iocs")
|
767 |
+
|
768 |
+
with col2:
|
769 |
+
st.button("Add to Watchlist", key="add_to_watchlist")
|
components/monitoring.py
ADDED
@@ -0,0 +1,555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import plotly.express as px
|
4 |
+
import plotly.graph_objects as go
|
5 |
+
import numpy as np
|
6 |
+
from datetime import datetime, timedelta
|
7 |
+
|
8 |
+
def render_monitoring():
|
9 |
+
st.title("Monitoring Configuration")
|
10 |
+
|
11 |
+
# Dashboard layout for monitoring configuration
|
12 |
+
col1, col2 = st.columns([2, 3])
|
13 |
+
|
14 |
+
with col1:
|
15 |
+
st.subheader("Monitoring Settings")
|
16 |
+
|
17 |
+
with st.form("monitoring_settings"):
|
18 |
+
st.markdown("### General Settings")
|
19 |
+
|
20 |
+
scan_frequency = st.select_slider(
|
21 |
+
"Scan Frequency",
|
22 |
+
options=["1 hour", "2 hours", "4 hours", "6 hours", "12 hours", "24 hours"],
|
23 |
+
value="4 hours"
|
24 |
+
)
|
25 |
+
|
26 |
+
intelligence_sources = st.multiselect(
|
27 |
+
"Intelligence Sources",
|
28 |
+
["Dark Web Forums", "Paste Sites", "Marketplaces", "Telegram Channels", "IRC Channels", "Ransomware Blogs", "Breach Databases", "Hacker Forums", "Social Media"],
|
29 |
+
default=["Dark Web Forums", "Paste Sites", "Marketplaces", "Ransomware Blogs"]
|
30 |
+
)
|
31 |
+
|
32 |
+
st.markdown("### Alert Thresholds")
|
33 |
+
|
34 |
+
col1a, col1b = st.columns(2)
|
35 |
+
|
36 |
+
with col1a:
|
37 |
+
critical_threshold = st.number_input("Critical Alert Threshold", min_value=1, max_value=100, value=80)
|
38 |
+
|
39 |
+
with col1b:
|
40 |
+
high_threshold = st.number_input("High Alert Threshold", min_value=1, max_value=100, value=60)
|
41 |
+
|
42 |
+
col1c, col1d = st.columns(2)
|
43 |
+
|
44 |
+
with col1c:
|
45 |
+
medium_threshold = st.number_input("Medium Alert Threshold", min_value=1, max_value=100, value=40)
|
46 |
+
|
47 |
+
with col1d:
|
48 |
+
low_threshold = st.number_input("Low Alert Threshold", min_value=1, max_value=100, value=20)
|
49 |
+
|
50 |
+
st.markdown("### Notification Channels")
|
51 |
+
|
52 |
+
email_notify = st.checkbox("Email Notifications", value=True)
|
53 |
+
if email_notify:
|
54 |
+
email_recipients = st.text_input("Email Recipients", value="[email protected], [email protected]")
|
55 |
+
|
56 |
+
slack_notify = st.checkbox("Slack Notifications", value=True)
|
57 |
+
if slack_notify:
|
58 |
+
slack_channel = st.text_input("Slack Channel", value="#security-alerts")
|
59 |
+
|
60 |
+
api_notify = st.checkbox("API Webhook", value=False)
|
61 |
+
if api_notify:
|
62 |
+
webhook_url = st.text_input("Webhook URL", placeholder="https://api.example.com/webhook")
|
63 |
+
|
64 |
+
sms_notify = st.checkbox("SMS Notifications", value=False)
|
65 |
+
if sms_notify:
|
66 |
+
phone_numbers = st.text_input("Phone Numbers", placeholder="+1234567890, +0987654321")
|
67 |
+
|
68 |
+
submit = st.form_submit_button("Save Configuration", type="primary")
|
69 |
+
|
70 |
+
if submit:
|
71 |
+
st.success("Monitoring configuration saved successfully!")
|
72 |
+
|
73 |
+
with col2:
|
74 |
+
st.subheader("Monitored Keywords & Entities")
|
75 |
+
|
76 |
+
# Tabs for different monitoring categories
|
77 |
+
tab1, tab2, tab3, tab4 = st.tabs(["Company Assets", "Credentials", "PII", "Custom Keywords"])
|
78 |
+
|
79 |
+
with tab1:
|
80 |
+
st.markdown("### Company Assets Monitoring")
|
81 |
+
|
82 |
+
# Sample company assets to monitor
|
83 |
+
company_assets = pd.DataFrame({
|
84 |
+
"Asset Type": ["Domain", "Domain", "IP Range", "Brand", "Brand", "Product", "Technology"],
|
85 |
+
"Value": ["company.com", "company-services.net", "198.51.100.0/24", "CompanyName", "ProductX", "ServiceY", "TechnologyZ"],
|
86 |
+
"Priority": ["High", "Medium", "High", "Critical", "High", "Medium", "Low"],
|
87 |
+
"Status": ["Active", "Active", "Active", "Active", "Active", "Active", "Active"]
|
88 |
+
})
|
89 |
+
|
90 |
+
# Editable dataframe
|
91 |
+
edited_assets = st.data_editor(
|
92 |
+
company_assets,
|
93 |
+
num_rows="dynamic",
|
94 |
+
column_config={
|
95 |
+
"Asset Type": st.column_config.SelectboxColumn(
|
96 |
+
"Asset Type",
|
97 |
+
options=["Domain", "IP Range", "Brand", "Product", "Technology", "Other"],
|
98 |
+
),
|
99 |
+
"Priority": st.column_config.SelectboxColumn(
|
100 |
+
"Priority",
|
101 |
+
options=["Critical", "High", "Medium", "Low"],
|
102 |
+
),
|
103 |
+
"Status": st.column_config.SelectboxColumn(
|
104 |
+
"Status",
|
105 |
+
options=["Active", "Paused"],
|
106 |
+
),
|
107 |
+
},
|
108 |
+
use_container_width=True
|
109 |
+
)
|
110 |
+
|
111 |
+
with tab2:
|
112 |
+
st.markdown("### Credentials Monitoring")
|
113 |
+
|
114 |
+
# Sample credential monitoring settings
|
115 |
+
credential_monitoring = pd.DataFrame({
|
116 |
+
"Email Domain": ["@company.com", "@company-services.net", "@product-x.com"],
|
117 |
+
"Include Subdomains": [True, True, False],
|
118 |
+
"Monitor Password Breach": [True, True, True],
|
119 |
+
"Alert Level": ["Critical", "High", "High"],
|
120 |
+
"Status": ["Active", "Active", "Active"]
|
121 |
+
})
|
122 |
+
|
123 |
+
edited_credentials = st.data_editor(
|
124 |
+
credential_monitoring,
|
125 |
+
num_rows="dynamic",
|
126 |
+
column_config={
|
127 |
+
"Include Subdomains": st.column_config.CheckboxColumn(
|
128 |
+
"Include Subdomains",
|
129 |
+
help="Monitor all subdomains",
|
130 |
+
),
|
131 |
+
"Monitor Password Breach": st.column_config.CheckboxColumn(
|
132 |
+
"Monitor Password Breach",
|
133 |
+
),
|
134 |
+
"Alert Level": st.column_config.SelectboxColumn(
|
135 |
+
"Alert Level",
|
136 |
+
options=["Critical", "High", "Medium", "Low"],
|
137 |
+
),
|
138 |
+
"Status": st.column_config.SelectboxColumn(
|
139 |
+
"Status",
|
140 |
+
options=["Active", "Paused"],
|
141 |
+
),
|
142 |
+
},
|
143 |
+
use_container_width=True
|
144 |
+
)
|
145 |
+
|
146 |
+
with tab3:
|
147 |
+
st.markdown("### PII Monitoring")
|
148 |
+
|
149 |
+
# Sample PII monitoring settings
|
150 |
+
pii_monitoring = pd.DataFrame({
|
151 |
+
"PII Type": ["SSN", "Credit Card", "Bank Account", "Passport Number", "Driver License"],
|
152 |
+
"Monitor": [True, True, True, False, False],
|
153 |
+
"Alert Level": ["Critical", "Critical", "High", "High", "Medium"],
|
154 |
+
"Status": ["Active", "Active", "Active", "Paused", "Paused"]
|
155 |
+
})
|
156 |
+
|
157 |
+
edited_pii = st.data_editor(
|
158 |
+
pii_monitoring,
|
159 |
+
num_rows="dynamic",
|
160 |
+
column_config={
|
161 |
+
"PII Type": st.column_config.SelectboxColumn(
|
162 |
+
"PII Type",
|
163 |
+
options=["SSN", "Credit Card", "Bank Account", "Passport Number", "Driver License", "Health Information", "Other"],
|
164 |
+
),
|
165 |
+
"Monitor": st.column_config.CheckboxColumn(
|
166 |
+
"Monitor",
|
167 |
+
),
|
168 |
+
"Alert Level": st.column_config.SelectboxColumn(
|
169 |
+
"Alert Level",
|
170 |
+
options=["Critical", "High", "Medium", "Low"],
|
171 |
+
),
|
172 |
+
"Status": st.column_config.SelectboxColumn(
|
173 |
+
"Status",
|
174 |
+
options=["Active", "Paused"],
|
175 |
+
),
|
176 |
+
},
|
177 |
+
use_container_width=True
|
178 |
+
)
|
179 |
+
|
180 |
+
with tab4:
|
181 |
+
st.markdown("### Custom Keywords")
|
182 |
+
|
183 |
+
# Sample custom keywords
|
184 |
+
custom_keywords = pd.DataFrame({
|
185 |
+
"Keyword": ["confidential memo", "project phoenix", "merger", "acquisition", "layoff", "security breach"],
|
186 |
+
"Category": ["Internal Document", "Project", "Financial", "Financial", "HR", "Security"],
|
187 |
+
"Alert Level": ["Critical", "High", "Critical", "Critical", "High", "Critical"],
|
188 |
+
"Status": ["Active", "Active", "Active", "Active", "Active", "Active"]
|
189 |
+
})
|
190 |
+
|
191 |
+
edited_keywords = st.data_editor(
|
192 |
+
custom_keywords,
|
193 |
+
num_rows="dynamic",
|
194 |
+
column_config={
|
195 |
+
"Category": st.column_config.SelectboxColumn(
|
196 |
+
"Category",
|
197 |
+
options=["Internal Document", "Project", "Financial", "HR", "Security", "Product", "Other"],
|
198 |
+
),
|
199 |
+
"Alert Level": st.column_config.SelectboxColumn(
|
200 |
+
"Alert Level",
|
201 |
+
options=["Critical", "High", "Medium", "Low"],
|
202 |
+
),
|
203 |
+
"Status": st.column_config.SelectboxColumn(
|
204 |
+
"Status",
|
205 |
+
options=["Active", "Paused"],
|
206 |
+
),
|
207 |
+
},
|
208 |
+
use_container_width=True
|
209 |
+
)
|
210 |
+
|
211 |
+
# Monitoring sources and coverage
|
212 |
+
st.markdown("---")
|
213 |
+
st.subheader("Monitoring Sources & Coverage")
|
214 |
+
|
215 |
+
# Create tabs for different monitoring source categories
|
216 |
+
source_tab1, source_tab2, source_tab3 = st.tabs(["Dark Web Coverage", "Source Categories", "Geographic Coverage"])
|
217 |
+
|
218 |
+
with source_tab1:
|
219 |
+
# Dark web monitoring sources
|
220 |
+
st.markdown("### Dark Web Monitoring Sources")
|
221 |
+
|
222 |
+
# Sample data for dark web sources
|
223 |
+
dark_web_sources = pd.DataFrame({
|
224 |
+
"Source Type": ["Market", "Forum", "Forum", "Market", "Paste Site", "Leak Site", "Chat", "Market"],
|
225 |
+
"Name": ["AlphaBay", "XSS Forum", "Exploit.in", "ASAP Market", "DeepPaste", "DarkLeak", "Telegram", "White House"],
|
226 |
+
"Focus": ["General", "Hacking", "Credentials", "Drugs/Fraud", "Text sharing", "Data leaks", "Communication", "General"],
|
227 |
+
"Coverage": [95, 90, 85, 80, 75, 70, 65, 60],
|
228 |
+
"Status": ["Active", "Active", "Active", "Active", "Active", "Active", "Active", "Active"]
|
229 |
+
})
|
230 |
+
|
231 |
+
fig = px.bar(
|
232 |
+
dark_web_sources,
|
233 |
+
x="Name",
|
234 |
+
y="Coverage",
|
235 |
+
color="Coverage",
|
236 |
+
color_continuous_scale=["#2ECC71", "#F1C40F", "#E74C3C"],
|
237 |
+
text="Coverage",
|
238 |
+
height=400
|
239 |
+
)
|
240 |
+
|
241 |
+
fig.update_layout(
|
242 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
243 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
244 |
+
xaxis=dict(
|
245 |
+
title=None,
|
246 |
+
tickfont=dict(color='#ECF0F1')
|
247 |
+
),
|
248 |
+
yaxis=dict(
|
249 |
+
title="Coverage Percentage",
|
250 |
+
showgrid=True,
|
251 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
252 |
+
tickfont=dict(color='#ECF0F1')
|
253 |
+
),
|
254 |
+
coloraxis_showscale=False
|
255 |
+
)
|
256 |
+
|
257 |
+
fig.update_traces(texttemplate='%{text}%', textposition='outside')
|
258 |
+
|
259 |
+
st.plotly_chart(fig, use_container_width=True)
|
260 |
+
|
261 |
+
# Source details table
|
262 |
+
st.dataframe(dark_web_sources, use_container_width=True)
|
263 |
+
|
264 |
+
with source_tab2:
|
265 |
+
# Source category distribution
|
266 |
+
st.markdown("### Monitoring by Source Category")
|
267 |
+
|
268 |
+
# Sample data for source categories
|
269 |
+
source_categories = {
|
270 |
+
"Category": ["Dark Web Markets", "Hacking Forums", "Paste Sites", "Telegram Channels", "IRC Channels", "Leak Sites", "Ransomware Blogs", "Social Media"],
|
271 |
+
"Sources Count": [12, 15, 5, 18, 8, 7, 6, 10],
|
272 |
+
"Coverage Score": [90, 85, 75, 70, 60, 95, 80, 65]
|
273 |
+
}
|
274 |
+
|
275 |
+
source_df = pd.DataFrame(source_categories)
|
276 |
+
|
277 |
+
fig = px.scatter(
|
278 |
+
source_df,
|
279 |
+
x="Sources Count",
|
280 |
+
y="Coverage Score",
|
281 |
+
color="Coverage Score",
|
282 |
+
color_continuous_scale=["#E74C3C", "#F1C40F", "#2ECC71"],
|
283 |
+
size="Sources Count",
|
284 |
+
hover_name="Category",
|
285 |
+
height=400
|
286 |
+
)
|
287 |
+
|
288 |
+
fig.update_layout(
|
289 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
290 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
291 |
+
xaxis=dict(
|
292 |
+
title="Number of Sources",
|
293 |
+
showgrid=True,
|
294 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
295 |
+
tickfont=dict(color='#ECF0F1')
|
296 |
+
),
|
297 |
+
yaxis=dict(
|
298 |
+
title="Coverage Score (%)",
|
299 |
+
showgrid=True,
|
300 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
301 |
+
tickfont=dict(color='#ECF0F1')
|
302 |
+
),
|
303 |
+
coloraxis_showscale=False
|
304 |
+
)
|
305 |
+
|
306 |
+
st.plotly_chart(fig, use_container_width=True)
|
307 |
+
|
308 |
+
# Category details
|
309 |
+
st.dataframe(source_df, use_container_width=True)
|
310 |
+
|
311 |
+
with source_tab3:
|
312 |
+
# Geographic coverage
|
313 |
+
st.markdown("### Geographic Monitoring Coverage")
|
314 |
+
|
315 |
+
# World map showing coverage
|
316 |
+
st.image("https://images.unsplash.com/photo-1451187580459-43490279c0fa",
|
317 |
+
caption="Global monitoring coverage across dark web sources",
|
318 |
+
use_column_width=True)
|
319 |
+
|
320 |
+
# Regional coverage metrics
|
321 |
+
col_geo1, col_geo2, col_geo3, col_geo4 = st.columns(4)
|
322 |
+
|
323 |
+
with col_geo1:
|
324 |
+
st.metric(
|
325 |
+
label="North America",
|
326 |
+
value="92%",
|
327 |
+
delta="3%",
|
328 |
+
delta_color="normal"
|
329 |
+
)
|
330 |
+
|
331 |
+
with col_geo2:
|
332 |
+
st.metric(
|
333 |
+
label="Europe",
|
334 |
+
value="88%",
|
335 |
+
delta="5%",
|
336 |
+
delta_color="normal"
|
337 |
+
)
|
338 |
+
|
339 |
+
with col_geo3:
|
340 |
+
st.metric(
|
341 |
+
label="Asia Pacific",
|
342 |
+
value="76%",
|
343 |
+
delta="8%",
|
344 |
+
delta_color="normal"
|
345 |
+
)
|
346 |
+
|
347 |
+
with col_geo4:
|
348 |
+
st.metric(
|
349 |
+
label="Rest of World",
|
350 |
+
value="65%",
|
351 |
+
delta="12%",
|
352 |
+
delta_color="normal"
|
353 |
+
)
|
354 |
+
|
355 |
+
# Monitoring performance metrics
|
356 |
+
st.markdown("---")
|
357 |
+
st.subheader("Monitoring Performance")
|
358 |
+
|
359 |
+
# Performance metrics
|
360 |
+
perf_col1, perf_col2, perf_col3, perf_col4 = st.columns(4)
|
361 |
+
|
362 |
+
with perf_col1:
|
363 |
+
st.metric(
|
364 |
+
label="Scan Completion Rate",
|
365 |
+
value="98.7%",
|
366 |
+
delta="0.5%",
|
367 |
+
delta_color="normal"
|
368 |
+
)
|
369 |
+
|
370 |
+
with perf_col2:
|
371 |
+
st.metric(
|
372 |
+
label="Avg. Scan Duration",
|
373 |
+
value="43 min",
|
374 |
+
delta="-7 min",
|
375 |
+
delta_color="normal"
|
376 |
+
)
|
377 |
+
|
378 |
+
with perf_col3:
|
379 |
+
st.metric(
|
380 |
+
label="Monitored Keywords",
|
381 |
+
value="1,247",
|
382 |
+
delta="23",
|
383 |
+
delta_color="normal"
|
384 |
+
)
|
385 |
+
|
386 |
+
with perf_col4:
|
387 |
+
st.metric(
|
388 |
+
label="Coverage Index",
|
389 |
+
value="87/100",
|
390 |
+
delta="5",
|
391 |
+
delta_color="normal"
|
392 |
+
)
|
393 |
+
|
394 |
+
# Performance charts
|
395 |
+
st.markdown("### Performance Trends")
|
396 |
+
|
397 |
+
perf_tab1, perf_tab2 = st.tabs(["Scan Performance", "Detection Accuracy"])
|
398 |
+
|
399 |
+
with perf_tab1:
|
400 |
+
# Generate dates for the past 30 days
|
401 |
+
dates = [(datetime.now() - timedelta(days=i)).strftime('%Y-%m-%d') for i in range(30, 0, -1)]
|
402 |
+
|
403 |
+
# Sample data for scan performance
|
404 |
+
scan_times = np.random.normal(45, 5, 30).astype(int) # Mean 45 minutes, std 5 minutes
|
405 |
+
success_rates = np.random.normal(98, 1, 30) # Mean 98%, std 1%
|
406 |
+
success_rates = [min(100, max(90, rate)) for rate in success_rates] # Clamp between 90-100%
|
407 |
+
|
408 |
+
scan_data = pd.DataFrame({
|
409 |
+
'Date': dates,
|
410 |
+
'Scan Time (min)': scan_times,
|
411 |
+
'Success Rate (%)': success_rates
|
412 |
+
})
|
413 |
+
|
414 |
+
# Create a figure with two y-axes
|
415 |
+
fig = go.Figure()
|
416 |
+
|
417 |
+
# Add scan time line
|
418 |
+
fig.add_trace(go.Scatter(
|
419 |
+
x=scan_data['Date'],
|
420 |
+
y=scan_data['Scan Time (min)'],
|
421 |
+
name='Scan Time (min)',
|
422 |
+
line=dict(color='#3498DB', width=2)
|
423 |
+
))
|
424 |
+
|
425 |
+
# Add success rate line on secondary y-axis
|
426 |
+
fig.add_trace(go.Scatter(
|
427 |
+
x=scan_data['Date'],
|
428 |
+
y=scan_data['Success Rate (%)'],
|
429 |
+
name='Success Rate (%)',
|
430 |
+
line=dict(color='#2ECC71', width=2),
|
431 |
+
yaxis='y2'
|
432 |
+
))
|
433 |
+
|
434 |
+
# Configure the layout with two y-axes
|
435 |
+
fig.update_layout(
|
436 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
437 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
438 |
+
xaxis=dict(
|
439 |
+
title="Date",
|
440 |
+
showgrid=False,
|
441 |
+
tickfont=dict(color='#ECF0F1')
|
442 |
+
),
|
443 |
+
yaxis=dict(
|
444 |
+
title="Scan Time (min)",
|
445 |
+
showgrid=True,
|
446 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
447 |
+
tickfont=dict(color='#ECF0F1'),
|
448 |
+
range=[0, 60]
|
449 |
+
),
|
450 |
+
yaxis2=dict(
|
451 |
+
title="Success Rate (%)",
|
452 |
+
showgrid=False,
|
453 |
+
tickfont=dict(color='#ECF0F1'),
|
454 |
+
overlaying='y',
|
455 |
+
side='right',
|
456 |
+
range=[90, 100]
|
457 |
+
),
|
458 |
+
legend=dict(
|
459 |
+
orientation="h",
|
460 |
+
yanchor="bottom",
|
461 |
+
y=1.02,
|
462 |
+
xanchor="right",
|
463 |
+
x=1,
|
464 |
+
font=dict(color='#ECF0F1')
|
465 |
+
),
|
466 |
+
height=400
|
467 |
+
)
|
468 |
+
|
469 |
+
st.plotly_chart(fig, use_container_width=True)
|
470 |
+
|
471 |
+
with perf_tab2:
|
472 |
+
# Sample data for detection accuracy
|
473 |
+
accuracy_data = pd.DataFrame({
|
474 |
+
'Date': dates,
|
475 |
+
'True Positives': np.random.randint(80, 100, 30),
|
476 |
+
'False Positives': np.random.randint(5, 15, 30),
|
477 |
+
'Precision': np.random.normal(92, 2, 30),
|
478 |
+
'Recall': np.random.normal(90, 3, 30)
|
479 |
+
})
|
480 |
+
|
481 |
+
# Ensure precision and recall are within reasonable bounds
|
482 |
+
accuracy_data['Precision'] = accuracy_data['Precision'].apply(lambda x: min(100, max(80, x)))
|
483 |
+
accuracy_data['Recall'] = accuracy_data['Recall'].apply(lambda x: min(100, max(80, x)))
|
484 |
+
|
485 |
+
# Create a figure with stacked bars and lines
|
486 |
+
fig = go.Figure()
|
487 |
+
|
488 |
+
# Add stacked bars for true and false positives
|
489 |
+
fig.add_trace(go.Bar(
|
490 |
+
x=accuracy_data['Date'],
|
491 |
+
y=accuracy_data['True Positives'],
|
492 |
+
name='True Positives',
|
493 |
+
marker_color='#2ECC71'
|
494 |
+
))
|
495 |
+
|
496 |
+
fig.add_trace(go.Bar(
|
497 |
+
x=accuracy_data['Date'],
|
498 |
+
y=accuracy_data['False Positives'],
|
499 |
+
name='False Positives',
|
500 |
+
marker_color='#E74C3C'
|
501 |
+
))
|
502 |
+
|
503 |
+
# Add lines for precision and recall
|
504 |
+
fig.add_trace(go.Scatter(
|
505 |
+
x=accuracy_data['Date'],
|
506 |
+
y=accuracy_data['Precision'],
|
507 |
+
name='Precision (%)',
|
508 |
+
line=dict(color='#3498DB', width=2),
|
509 |
+
yaxis='y2'
|
510 |
+
))
|
511 |
+
|
512 |
+
fig.add_trace(go.Scatter(
|
513 |
+
x=accuracy_data['Date'],
|
514 |
+
y=accuracy_data['Recall'],
|
515 |
+
name='Recall (%)',
|
516 |
+
line=dict(color='#F1C40F', width=2),
|
517 |
+
yaxis='y2'
|
518 |
+
))
|
519 |
+
|
520 |
+
# Configure the layout
|
521 |
+
fig.update_layout(
|
522 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
523 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
524 |
+
barmode='stack',
|
525 |
+
xaxis=dict(
|
526 |
+
title="Date",
|
527 |
+
showgrid=False,
|
528 |
+
tickfont=dict(color='#ECF0F1')
|
529 |
+
),
|
530 |
+
yaxis=dict(
|
531 |
+
title="Alert Count",
|
532 |
+
showgrid=True,
|
533 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
534 |
+
tickfont=dict(color='#ECF0F1')
|
535 |
+
),
|
536 |
+
yaxis2=dict(
|
537 |
+
title="Percentage (%)",
|
538 |
+
showgrid=False,
|
539 |
+
tickfont=dict(color='#ECF0F1'),
|
540 |
+
overlaying='y',
|
541 |
+
side='right',
|
542 |
+
range=[80, 100]
|
543 |
+
),
|
544 |
+
legend=dict(
|
545 |
+
orientation="h",
|
546 |
+
yanchor="bottom",
|
547 |
+
y=1.02,
|
548 |
+
xanchor="right",
|
549 |
+
x=1,
|
550 |
+
font=dict(color='#ECF0F1')
|
551 |
+
),
|
552 |
+
height=400
|
553 |
+
)
|
554 |
+
|
555 |
+
st.plotly_chart(fig, use_container_width=True)
|
components/reports.py
ADDED
@@ -0,0 +1,442 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import plotly.express as px
|
4 |
+
import plotly.graph_objects as go
|
5 |
+
import numpy as np
|
6 |
+
from datetime import datetime, timedelta
|
7 |
+
|
8 |
+
def render_reports():
|
9 |
+
st.title("Intelligence Reports")
|
10 |
+
|
11 |
+
# Report filters
|
12 |
+
with st.container():
|
13 |
+
st.subheader("Report Filters")
|
14 |
+
|
15 |
+
col1, col2, col3, col4 = st.columns(4)
|
16 |
+
|
17 |
+
with col1:
|
18 |
+
report_type = st.multiselect(
|
19 |
+
"Report Type",
|
20 |
+
["Threat Intelligence", "Data Breach", "Executive", "Technical", "Custom"],
|
21 |
+
default=["Threat Intelligence", "Data Breach"]
|
22 |
+
)
|
23 |
+
|
24 |
+
with col2:
|
25 |
+
time_period = st.selectbox(
|
26 |
+
"Time Period",
|
27 |
+
["Last 7 Days", "Last 30 Days", "Last Quarter", "Year to Date", "Custom Range"],
|
28 |
+
index=1
|
29 |
+
)
|
30 |
+
|
31 |
+
with col3:
|
32 |
+
severity = st.multiselect(
|
33 |
+
"Severity",
|
34 |
+
["Critical", "High", "Medium", "Low"],
|
35 |
+
default=["Critical", "High"]
|
36 |
+
)
|
37 |
+
|
38 |
+
with col4:
|
39 |
+
keywords = st.text_input("Keywords", placeholder="e.g. healthcare, ransomware")
|
40 |
+
|
41 |
+
# Recent reports
|
42 |
+
st.markdown("### Recent Reports")
|
43 |
+
|
44 |
+
# Sample report data
|
45 |
+
reports = [
|
46 |
+
{
|
47 |
+
"id": "RPT-2025-04083",
|
48 |
+
"title": "Healthcare Data Breach Intelligence Report",
|
49 |
+
"date": "2025-04-08",
|
50 |
+
"type": "Data Breach",
|
51 |
+
"severity": "Critical",
|
52 |
+
"status": "Final"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"id": "RPT-2025-04082",
|
56 |
+
"title": "Weekly Threat Intelligence Summary",
|
57 |
+
"date": "2025-04-08",
|
58 |
+
"type": "Threat Intelligence",
|
59 |
+
"severity": "High",
|
60 |
+
"status": "Final"
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"id": "RPT-2025-04073",
|
64 |
+
"title": "Emerging Ransomware Group Analysis",
|
65 |
+
"date": "2025-04-07",
|
66 |
+
"type": "Technical",
|
67 |
+
"severity": "High",
|
68 |
+
"status": "Final"
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"id": "RPT-2025-04072",
|
72 |
+
"title": "Executive Threat Landscape Overview",
|
73 |
+
"date": "2025-04-07",
|
74 |
+
"type": "Executive",
|
75 |
+
"severity": "Medium",
|
76 |
+
"status": "Final"
|
77 |
+
},
|
78 |
+
{
|
79 |
+
"id": "RPT-2025-04063",
|
80 |
+
"title": "Financial Sector Threat Assessment",
|
81 |
+
"date": "2025-04-06",
|
82 |
+
"type": "Threat Intelligence",
|
83 |
+
"severity": "High",
|
84 |
+
"status": "Final"
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"id": "RPT-2025-04053",
|
88 |
+
"title": "Technical Analysis: PII Exposure in Dark Web",
|
89 |
+
"date": "2025-04-05",
|
90 |
+
"type": "Technical",
|
91 |
+
"severity": "Medium",
|
92 |
+
"status": "Final"
|
93 |
+
}
|
94 |
+
]
|
95 |
+
|
96 |
+
# Create a DataFrame
|
97 |
+
report_df = pd.DataFrame(reports)
|
98 |
+
|
99 |
+
# Report display
|
100 |
+
for i, report in enumerate(reports):
|
101 |
+
severity_color = "#E74C3C" if report["severity"] == "Critical" else "#F1C40F" if report["severity"] == "High" else "#3498DB" if report["severity"] == "Medium" else "#2ECC71"
|
102 |
+
|
103 |
+
with st.container():
|
104 |
+
cols = st.columns([4, 1, 1, 1])
|
105 |
+
|
106 |
+
with cols[0]:
|
107 |
+
st.markdown(f"#### {report['title']}")
|
108 |
+
st.caption(f"ID: {report['id']} | Date: {report['date']}")
|
109 |
+
|
110 |
+
with cols[1]:
|
111 |
+
st.markdown(f"**Type:** {report['type']}")
|
112 |
+
|
113 |
+
with cols[2]:
|
114 |
+
st.markdown(f"**<span style='color:{severity_color}'>{report['severity']}</span>**", unsafe_allow_html=True)
|
115 |
+
|
116 |
+
with cols[3]:
|
117 |
+
st.button("View", key=f"view_report_{i}")
|
118 |
+
|
119 |
+
st.markdown("---")
|
120 |
+
|
121 |
+
# Generate a report
|
122 |
+
st.markdown("### Generate New Report")
|
123 |
+
|
124 |
+
with st.form("report_generator"):
|
125 |
+
st.markdown("#### Report Parameters")
|
126 |
+
|
127 |
+
col1, col2 = st.columns(2)
|
128 |
+
|
129 |
+
with col1:
|
130 |
+
report_title = st.text_input("Report Title", placeholder="e.g. Monthly Threat Intelligence Summary")
|
131 |
+
|
132 |
+
report_type_selection = st.selectbox(
|
133 |
+
"Report Type",
|
134 |
+
["Threat Intelligence", "Data Breach", "Executive", "Technical", "Custom"]
|
135 |
+
)
|
136 |
+
|
137 |
+
with col2:
|
138 |
+
report_period = st.selectbox(
|
139 |
+
"Report Period",
|
140 |
+
["Last 7 Days", "Last 30 Days", "Last Quarter", "Year to Date", "Custom Range"]
|
141 |
+
)
|
142 |
+
|
143 |
+
if report_period == "Custom Range":
|
144 |
+
start_date = st.date_input("Start Date", datetime.now() - timedelta(days=30))
|
145 |
+
end_date = st.date_input("End Date", datetime.now())
|
146 |
+
|
147 |
+
st.markdown("#### Report Content")
|
148 |
+
|
149 |
+
include_options = st.columns(3)
|
150 |
+
|
151 |
+
with include_options[0]:
|
152 |
+
include_summary = st.checkbox("Executive Summary", value=True)
|
153 |
+
include_threats = st.checkbox("Threat Overview", value=True)
|
154 |
+
include_breaches = st.checkbox("Data Breaches", value=True)
|
155 |
+
|
156 |
+
with include_options[1]:
|
157 |
+
include_credentials = st.checkbox("Exposed Credentials", value=True)
|
158 |
+
include_ioc = st.checkbox("Indicators of Compromise", value=True)
|
159 |
+
include_actors = st.checkbox("Threat Actor Analysis", value=True)
|
160 |
+
|
161 |
+
with include_options[2]:
|
162 |
+
include_trends = st.checkbox("Trend Analysis", value=True)
|
163 |
+
include_mitigation = st.checkbox("Mitigation Recommendations", value=True)
|
164 |
+
include_references = st.checkbox("References", value=True)
|
165 |
+
|
166 |
+
st.markdown("#### Distribution")
|
167 |
+
|
168 |
+
distribution = st.multiselect(
|
169 |
+
"Distribute To",
|
170 |
+
["Security Team", "Executive Team", "IT Department", "Legal Department", "Custom Recipients"],
|
171 |
+
default=["Security Team"]
|
172 |
+
)
|
173 |
+
|
174 |
+
if "Custom Recipients" in distribution:
|
175 |
+
custom_recipients = st.text_input("Custom Recipients (separated by commas)")
|
176 |
+
|
177 |
+
generate_button = st.form_submit_button("Generate Report")
|
178 |
+
|
179 |
+
if generate_button:
|
180 |
+
st.success("Report generation initiated! Your report will be available shortly.")
|
181 |
+
|
182 |
+
# Report analytics
|
183 |
+
st.markdown("---")
|
184 |
+
st.subheader("Report Analytics")
|
185 |
+
|
186 |
+
# Report metrics
|
187 |
+
metric_col1, metric_col2, metric_col3, metric_col4 = st.columns(4)
|
188 |
+
|
189 |
+
with metric_col1:
|
190 |
+
st.metric(
|
191 |
+
label="Reports Generated",
|
192 |
+
value="87",
|
193 |
+
delta="12",
|
194 |
+
delta_color="normal"
|
195 |
+
)
|
196 |
+
|
197 |
+
with metric_col2:
|
198 |
+
st.metric(
|
199 |
+
label="Critical Reports",
|
200 |
+
value="23",
|
201 |
+
delta="5",
|
202 |
+
delta_color="normal"
|
203 |
+
)
|
204 |
+
|
205 |
+
with metric_col3:
|
206 |
+
st.metric(
|
207 |
+
label="Avg. Generation Time",
|
208 |
+
value="3.5 min",
|
209 |
+
delta="-0.8 min",
|
210 |
+
delta_color="normal"
|
211 |
+
)
|
212 |
+
|
213 |
+
with metric_col4:
|
214 |
+
st.metric(
|
215 |
+
label="Distribution Rate",
|
216 |
+
value="97%",
|
217 |
+
delta="2%",
|
218 |
+
delta_color="normal"
|
219 |
+
)
|
220 |
+
|
221 |
+
# Report analytics charts
|
222 |
+
analytics_tab1, analytics_tab2 = st.tabs(["Report Generation Trends", "Report Distribution"])
|
223 |
+
|
224 |
+
with analytics_tab1:
|
225 |
+
# Generate dates for the past 30 days
|
226 |
+
dates = [(datetime.now() - timedelta(days=i)).strftime('%Y-%m-%d') for i in range(30, 0, -1)]
|
227 |
+
|
228 |
+
# Sample data for report generation
|
229 |
+
report_data = {
|
230 |
+
'Date': dates,
|
231 |
+
'Executive': np.random.randint(0, 2, 30),
|
232 |
+
'Threat Intelligence': np.random.randint(1, 4, 30),
|
233 |
+
'Data Breach': np.random.randint(0, 3, 30),
|
234 |
+
'Technical': np.random.randint(1, 5, 30)
|
235 |
+
}
|
236 |
+
|
237 |
+
report_df = pd.DataFrame(report_data)
|
238 |
+
|
239 |
+
# Create stacked bar chart
|
240 |
+
fig = go.Figure()
|
241 |
+
|
242 |
+
fig.add_trace(go.Bar(
|
243 |
+
x=report_df['Date'],
|
244 |
+
y=report_df['Executive'],
|
245 |
+
name='Executive',
|
246 |
+
marker_color='#9B59B6'
|
247 |
+
))
|
248 |
+
|
249 |
+
fig.add_trace(go.Bar(
|
250 |
+
x=report_df['Date'],
|
251 |
+
y=report_df['Threat Intelligence'],
|
252 |
+
name='Threat Intelligence',
|
253 |
+
marker_color='#3498DB'
|
254 |
+
))
|
255 |
+
|
256 |
+
fig.add_trace(go.Bar(
|
257 |
+
x=report_df['Date'],
|
258 |
+
y=report_df['Data Breach'],
|
259 |
+
name='Data Breach',
|
260 |
+
marker_color='#E74C3C'
|
261 |
+
))
|
262 |
+
|
263 |
+
fig.add_trace(go.Bar(
|
264 |
+
x=report_df['Date'],
|
265 |
+
y=report_df['Technical'],
|
266 |
+
name='Technical',
|
267 |
+
marker_color='#2ECC71'
|
268 |
+
))
|
269 |
+
|
270 |
+
fig.update_layout(
|
271 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
272 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
273 |
+
barmode='stack',
|
274 |
+
xaxis=dict(
|
275 |
+
title="Date",
|
276 |
+
showgrid=False,
|
277 |
+
tickfont=dict(color='#ECF0F1')
|
278 |
+
),
|
279 |
+
yaxis=dict(
|
280 |
+
title="Number of Reports",
|
281 |
+
showgrid=True,
|
282 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
283 |
+
tickfont=dict(color='#ECF0F1')
|
284 |
+
),
|
285 |
+
legend=dict(
|
286 |
+
orientation="h",
|
287 |
+
yanchor="bottom",
|
288 |
+
y=1.02,
|
289 |
+
xanchor="right",
|
290 |
+
x=1,
|
291 |
+
font=dict(color='#ECF0F1')
|
292 |
+
),
|
293 |
+
height=400
|
294 |
+
)
|
295 |
+
|
296 |
+
st.plotly_chart(fig, use_container_width=True)
|
297 |
+
|
298 |
+
with analytics_tab2:
|
299 |
+
# Report distribution pie chart
|
300 |
+
st.subheader("Report Distribution by Recipient")
|
301 |
+
|
302 |
+
distribution_data = {
|
303 |
+
'Recipient': ['Security Team', 'Executive Team', 'IT Department', 'Legal Department', 'Other'],
|
304 |
+
'Count': [45, 23, 31, 15, 8]
|
305 |
+
}
|
306 |
+
|
307 |
+
dist_df = pd.DataFrame(distribution_data)
|
308 |
+
|
309 |
+
fig = px.pie(
|
310 |
+
dist_df,
|
311 |
+
values='Count',
|
312 |
+
names='Recipient',
|
313 |
+
hole=0.4,
|
314 |
+
color_discrete_sequence=['#3498DB', '#9B59B6', '#2ECC71', '#F1C40F', '#E74C3C']
|
315 |
+
)
|
316 |
+
|
317 |
+
fig.update_layout(
|
318 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
319 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
320 |
+
showlegend=True,
|
321 |
+
legend=dict(
|
322 |
+
orientation="h",
|
323 |
+
yanchor="bottom",
|
324 |
+
y=-0.2,
|
325 |
+
xanchor="center",
|
326 |
+
x=0.5,
|
327 |
+
font=dict(color='#ECF0F1')
|
328 |
+
),
|
329 |
+
margin=dict(l=0, r=0, t=0, b=10),
|
330 |
+
height=350
|
331 |
+
)
|
332 |
+
|
333 |
+
st.plotly_chart(fig, use_container_width=True)
|
334 |
+
|
335 |
+
# Sample report view
|
336 |
+
st.markdown("---")
|
337 |
+
st.subheader("Sample Report Preview")
|
338 |
+
|
339 |
+
# Report header
|
340 |
+
st.markdown("# Healthcare Data Breach Intelligence Report")
|
341 |
+
st.markdown("**Report ID:** RPT-2025-04083")
|
342 |
+
st.markdown("**Date:** April 8, 2025")
|
343 |
+
st.markdown("**Classification:** Confidential")
|
344 |
+
st.markdown("**Severity:** Critical")
|
345 |
+
|
346 |
+
# Table of contents
|
347 |
+
st.markdown("## Table of Contents")
|
348 |
+
st.markdown("""
|
349 |
+
1. Executive Summary
|
350 |
+
2. Breach Details
|
351 |
+
3. Affected Data
|
352 |
+
4. Threat Actor Analysis
|
353 |
+
5. Timeline of Events
|
354 |
+
6. Technical Indicators
|
355 |
+
7. Recommendations
|
356 |
+
8. References
|
357 |
+
""")
|
358 |
+
|
359 |
+
# Executive Summary
|
360 |
+
st.markdown("## 1. Executive Summary")
|
361 |
+
st.markdown("""
|
362 |
+
On April 7, 2025, CyberForge OSINT Platform detected evidence of a significant data breach affecting Memorial Hospital.
|
363 |
+
Patient records containing personally identifiable information (PII) and protected health information (PHI) were
|
364 |
+
discovered for sale on a prominent dark web marketplace. Initial analysis indicates approximately 50,000 patient
|
365 |
+
records may be affected. This report provides detailed analysis of the breach, indicators of compromise, and
|
366 |
+
recommended actions.
|
367 |
+
""")
|
368 |
+
|
369 |
+
# Key findings
|
370 |
+
st.info("""
|
371 |
+
**Key Findings:**
|
372 |
+
|
373 |
+
* Patient data including names, addresses, social security numbers, and medical records are being offered for sale
|
374 |
+
* The threat actor appears to be affiliated with the BlackCat ransomware group
|
375 |
+
* Initial access likely occurred between March 15-20, 2025
|
376 |
+
* The breach has not yet been publicly disclosed by the healthcare provider
|
377 |
+
* Similar tactics have been observed in other healthcare breaches in the past 60 days
|
378 |
+
""")
|
379 |
+
|
380 |
+
# Breach details
|
381 |
+
st.markdown("## 2. Breach Details")
|
382 |
+
st.markdown("""
|
383 |
+
The data breach was detected on April 7, 2025, at 22:03 UTC when our monitoring system identified a new listing
|
384 |
+
on AlphaBay marketplace offering "Complete patient database from major US hospital" for sale. The listing specifically
|
385 |
+
mentioned Memorial Hospital by name and included sample data as proof of the breach. The seller, operating under the
|
386 |
+
username "MedLeaks", is requesting 45 BTC (approximately $1.8 million USD) for the complete dataset.
|
387 |
+
""")
|
388 |
+
|
389 |
+
# Sample chart
|
390 |
+
affected_data = {
|
391 |
+
'Data Type': ['Medical Records', 'Personally Identifiable Information', 'Insurance Information', 'Billing Information', 'Staff Credentials'],
|
392 |
+
'Records': [42000, 50000, 38000, 35000, 1200]
|
393 |
+
}
|
394 |
+
|
395 |
+
affected_df = pd.DataFrame(affected_data)
|
396 |
+
|
397 |
+
fig = px.bar(
|
398 |
+
affected_df,
|
399 |
+
x='Records',
|
400 |
+
y='Data Type',
|
401 |
+
orientation='h',
|
402 |
+
color='Records',
|
403 |
+
color_continuous_scale=['#3498DB', '#F1C40F', '#E74C3C'],
|
404 |
+
height=300
|
405 |
+
)
|
406 |
+
|
407 |
+
fig.update_layout(
|
408 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
409 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
410 |
+
coloraxis_showscale=False,
|
411 |
+
xaxis=dict(
|
412 |
+
title="Number of Records",
|
413 |
+
showgrid=True,
|
414 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
415 |
+
tickfont=dict(color='#ECF0F1')
|
416 |
+
),
|
417 |
+
yaxis=dict(
|
418 |
+
title=None,
|
419 |
+
showgrid=False,
|
420 |
+
tickfont=dict(color='#ECF0F1')
|
421 |
+
),
|
422 |
+
margin=dict(l=0, r=0, t=10, b=0)
|
423 |
+
)
|
424 |
+
|
425 |
+
st.plotly_chart(fig, use_container_width=True)
|
426 |
+
|
427 |
+
# Report actions
|
428 |
+
action_col1, action_col2, action_col3 = st.columns(3)
|
429 |
+
|
430 |
+
with action_col1:
|
431 |
+
st.download_button(
|
432 |
+
label="Download Full Report",
|
433 |
+
data="This is a placeholder for the full report download",
|
434 |
+
file_name="Healthcare_Data_Breach_Report.pdf",
|
435 |
+
mime="application/pdf"
|
436 |
+
)
|
437 |
+
|
438 |
+
with action_col2:
|
439 |
+
st.button("Share Report", key="share_report")
|
440 |
+
|
441 |
+
with action_col3:
|
442 |
+
st.button("Print Report", key="print_report")
|
components/search_trends.py
ADDED
@@ -0,0 +1,684 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Search History and Trends Component
|
3 |
+
|
4 |
+
This component provides UI for displaying and analyzing search history and trends.
|
5 |
+
"""
|
6 |
+
import streamlit as st
|
7 |
+
import pandas as pd
|
8 |
+
import plotly.express as px
|
9 |
+
import plotly.graph_objects as go
|
10 |
+
from datetime import datetime, timedelta
|
11 |
+
import asyncio
|
12 |
+
import json
|
13 |
+
from typing import Dict, List, Any, Optional
|
14 |
+
import random
|
15 |
+
|
16 |
+
from src.api.services.search_history_service import (
|
17 |
+
get_search_history,
|
18 |
+
get_trending_topics,
|
19 |
+
get_search_trend_analysis,
|
20 |
+
get_popular_searches,
|
21 |
+
add_search_history,
|
22 |
+
save_search,
|
23 |
+
create_saved_search,
|
24 |
+
get_saved_searches
|
25 |
+
)
|
26 |
+
|
27 |
+
# For demo/placeholder data when database is not populated
|
28 |
+
def generate_demo_trends():
|
29 |
+
"""Generate demo trend data"""
|
30 |
+
topics = [
|
31 |
+
"ransomware", "databreach", "malware", "phishing", "zeroday",
|
32 |
+
"darkmarket", "cryptolocker", "anonymity", "botnet", "exploit",
|
33 |
+
"vulnerability", "trojan", "blackmarket", "identity", "creditcard",
|
34 |
+
"hacking", "ddos", "credentials", "bitcoin", "monero"
|
35 |
+
]
|
36 |
+
|
37 |
+
return [
|
38 |
+
{
|
39 |
+
"topic": topic,
|
40 |
+
"mentions": random.randint(5, 100),
|
41 |
+
"growth_rate": random.uniform(0.5, 25.0)
|
42 |
+
}
|
43 |
+
for topic in random.sample(topics, min(len(topics), 10))
|
44 |
+
]
|
45 |
+
|
46 |
+
def generate_demo_search_data(days=30):
|
47 |
+
"""Generate demo search frequency data"""
|
48 |
+
base_date = datetime.now() - timedelta(days=days)
|
49 |
+
dates = [base_date + timedelta(days=i) for i in range(days)]
|
50 |
+
|
51 |
+
base_count = 10
|
52 |
+
trend = [random.randint(max(0, base_count-5), base_count+15) for _ in range(days)]
|
53 |
+
# Add a spike for visual interest
|
54 |
+
spike_day = random.randint(5, days-5)
|
55 |
+
trend[spike_day] = trend[spike_day] * 3
|
56 |
+
|
57 |
+
return [
|
58 |
+
{"interval": date, "count": count}
|
59 |
+
for date, count in zip(dates, trend)
|
60 |
+
]
|
61 |
+
|
62 |
+
def generate_demo_search_categories():
|
63 |
+
"""Generate demo search categories data"""
|
64 |
+
categories = [
|
65 |
+
"Marketplace", "Forum", "Data Breach", "Hacking Tools",
|
66 |
+
"Credential Dumps", "Crypto", "Scam", "Uncategorized"
|
67 |
+
]
|
68 |
+
return [
|
69 |
+
{"category": cat, "count": random.randint(10, 100)}
|
70 |
+
for cat in categories
|
71 |
+
]
|
72 |
+
|
73 |
+
def generate_demo_popular_searches():
|
74 |
+
"""Generate demo popular searches data"""
|
75 |
+
searches = [
|
76 |
+
"ransomware as a service", "credit card dumps", "personal data breach",
|
77 |
+
"hacking tools", "bank account access", "identity documents", "covid vaccine cards",
|
78 |
+
"social security numbers", "corporate credentials", "zero day exploits"
|
79 |
+
]
|
80 |
+
return [
|
81 |
+
{"query": query, "count": random.randint(5, 50)}
|
82 |
+
for query in searches
|
83 |
+
]
|
84 |
+
|
85 |
+
async def get_trend_data(days=90, trend_days=7, limit=10):
|
86 |
+
"""Get trend data from the database"""
|
87 |
+
try:
|
88 |
+
# Create a session without context manager
|
89 |
+
from src.streamlit_database import async_session
|
90 |
+
session = async_session()
|
91 |
+
|
92 |
+
try:
|
93 |
+
data = await get_search_trend_analysis(
|
94 |
+
db=session,
|
95 |
+
days=days,
|
96 |
+
trend_days=trend_days,
|
97 |
+
limit=limit
|
98 |
+
)
|
99 |
+
await session.commit()
|
100 |
+
return data
|
101 |
+
except Exception as e:
|
102 |
+
await session.rollback()
|
103 |
+
raise e
|
104 |
+
finally:
|
105 |
+
await session.close()
|
106 |
+
except Exception as e:
|
107 |
+
st.error(f"Error fetching trend data: {e}")
|
108 |
+
# Use demo data as fallback
|
109 |
+
return {
|
110 |
+
"frequency": generate_demo_search_data(days),
|
111 |
+
"popular_searches": generate_demo_popular_searches(),
|
112 |
+
"trending_topics": generate_demo_trends(),
|
113 |
+
"categories": generate_demo_search_categories(),
|
114 |
+
"recent_popular": generate_demo_popular_searches(),
|
115 |
+
"velocity": random.uniform(-10, 30),
|
116 |
+
"total_searches": {
|
117 |
+
"total": 1000,
|
118 |
+
"recent": 400,
|
119 |
+
"previous": 600
|
120 |
+
}
|
121 |
+
}
|
122 |
+
|
123 |
+
async def save_search_query(query, user_id=None, category=None, tags=None):
|
124 |
+
"""Save a search query to the database"""
|
125 |
+
try:
|
126 |
+
# Create a session without context manager
|
127 |
+
from src.streamlit_database import async_session
|
128 |
+
session = async_session()
|
129 |
+
|
130 |
+
try:
|
131 |
+
search = await add_search_history(
|
132 |
+
db=session,
|
133 |
+
query=query,
|
134 |
+
user_id=user_id,
|
135 |
+
category=category,
|
136 |
+
tags=tags,
|
137 |
+
result_count=random.randint(5, 100) # Placeholder
|
138 |
+
)
|
139 |
+
await session.commit()
|
140 |
+
return search
|
141 |
+
except Exception as e:
|
142 |
+
await session.rollback()
|
143 |
+
raise e
|
144 |
+
finally:
|
145 |
+
await session.close()
|
146 |
+
except Exception as e:
|
147 |
+
st.error(f"Error saving search: {e}")
|
148 |
+
return None
|
149 |
+
|
150 |
+
async def get_user_searches(user_id=None, limit=50):
|
151 |
+
"""Get search history for a user"""
|
152 |
+
try:
|
153 |
+
# Create a session without context manager
|
154 |
+
from src.streamlit_database import async_session
|
155 |
+
session = async_session()
|
156 |
+
|
157 |
+
try:
|
158 |
+
searches = await get_search_history(
|
159 |
+
db=session,
|
160 |
+
user_id=user_id,
|
161 |
+
limit=limit
|
162 |
+
)
|
163 |
+
await session.commit()
|
164 |
+
return searches
|
165 |
+
except Exception as e:
|
166 |
+
await session.rollback()
|
167 |
+
raise e
|
168 |
+
finally:
|
169 |
+
await session.close()
|
170 |
+
except Exception as e:
|
171 |
+
st.error(f"Error fetching search history: {e}")
|
172 |
+
return []
|
173 |
+
|
174 |
+
async def get_user_saved_searches(user_id=None):
|
175 |
+
"""Get saved searches for a user"""
|
176 |
+
try:
|
177 |
+
# Create a session without context manager
|
178 |
+
from src.streamlit_database import async_session
|
179 |
+
session = async_session()
|
180 |
+
|
181 |
+
try:
|
182 |
+
searches = await get_saved_searches(
|
183 |
+
db=session,
|
184 |
+
user_id=user_id
|
185 |
+
)
|
186 |
+
await session.commit()
|
187 |
+
return searches
|
188 |
+
except Exception as e:
|
189 |
+
await session.rollback()
|
190 |
+
raise e
|
191 |
+
finally:
|
192 |
+
await session.close()
|
193 |
+
except Exception as e:
|
194 |
+
st.error(f"Error fetching saved searches: {e}")
|
195 |
+
return []
|
196 |
+
|
197 |
+
async def create_new_saved_search(name, query, user_id=None, frequency=24, category=None):
|
198 |
+
"""Create a new saved search"""
|
199 |
+
try:
|
200 |
+
# Create a session without context manager
|
201 |
+
from src.streamlit_database import async_session
|
202 |
+
session = async_session()
|
203 |
+
|
204 |
+
try:
|
205 |
+
saved_search = await create_saved_search(
|
206 |
+
db=session,
|
207 |
+
name=name,
|
208 |
+
query=query,
|
209 |
+
user_id=user_id or 1, # Default user ID
|
210 |
+
frequency=frequency,
|
211 |
+
category=category
|
212 |
+
)
|
213 |
+
await session.commit()
|
214 |
+
return saved_search
|
215 |
+
except Exception as e:
|
216 |
+
await session.rollback()
|
217 |
+
raise e
|
218 |
+
finally:
|
219 |
+
await session.close()
|
220 |
+
except Exception as e:
|
221 |
+
st.error(f"Error creating saved search: {e}")
|
222 |
+
return None
|
223 |
+
|
224 |
+
def plot_search_trends(frequency_data):
|
225 |
+
"""Create a plot of search frequency over time"""
|
226 |
+
if not frequency_data:
|
227 |
+
return None
|
228 |
+
|
229 |
+
df = pd.DataFrame(frequency_data)
|
230 |
+
if 'interval' in df.columns:
|
231 |
+
df['interval'] = pd.to_datetime(df['interval'])
|
232 |
+
|
233 |
+
fig = px.line(
|
234 |
+
df,
|
235 |
+
x='interval',
|
236 |
+
y='count',
|
237 |
+
title='Search Frequency Over Time',
|
238 |
+
labels={'interval': 'Date', 'count': 'Number of Searches'},
|
239 |
+
template='plotly_dark'
|
240 |
+
)
|
241 |
+
|
242 |
+
fig.update_layout(
|
243 |
+
xaxis_title="Date",
|
244 |
+
yaxis_title="Number of Searches",
|
245 |
+
plot_bgcolor='rgba(17, 17, 17, 0.8)',
|
246 |
+
paper_bgcolor='rgba(17, 17, 17, 0)',
|
247 |
+
font=dict(color='white')
|
248 |
+
)
|
249 |
+
|
250 |
+
return fig
|
251 |
+
|
252 |
+
return None
|
253 |
+
|
254 |
+
def plot_category_distribution(category_data):
|
255 |
+
"""Create a plot of search categories distribution"""
|
256 |
+
if not category_data:
|
257 |
+
return None
|
258 |
+
|
259 |
+
df = pd.DataFrame(category_data)
|
260 |
+
|
261 |
+
fig = px.pie(
|
262 |
+
df,
|
263 |
+
values='count',
|
264 |
+
names='category',
|
265 |
+
title='Search Categories Distribution',
|
266 |
+
template='plotly_dark',
|
267 |
+
hole=0.4
|
268 |
+
)
|
269 |
+
|
270 |
+
fig.update_layout(
|
271 |
+
plot_bgcolor='rgba(17, 17, 17, 0.8)',
|
272 |
+
paper_bgcolor='rgba(17, 17, 17, 0)',
|
273 |
+
font=dict(color='white')
|
274 |
+
)
|
275 |
+
|
276 |
+
return fig
|
277 |
+
|
278 |
+
def plot_trending_topics(trending_data):
|
279 |
+
"""Create a bar chart of trending topics"""
|
280 |
+
if not trending_data:
|
281 |
+
return None
|
282 |
+
|
283 |
+
df = pd.DataFrame(trending_data)
|
284 |
+
if len(df) == 0:
|
285 |
+
return None
|
286 |
+
|
287 |
+
# Sort by mentions or growth rate
|
288 |
+
df = df.sort_values('growth_rate', ascending=False)
|
289 |
+
|
290 |
+
fig = px.bar(
|
291 |
+
df,
|
292 |
+
y='topic',
|
293 |
+
x='growth_rate',
|
294 |
+
title='Trending Topics by Growth Rate',
|
295 |
+
labels={'topic': 'Topic', 'growth_rate': 'Growth Rate (%)'},
|
296 |
+
orientation='h',
|
297 |
+
template='plotly_dark',
|
298 |
+
color='growth_rate',
|
299 |
+
color_continuous_scale='Viridis'
|
300 |
+
)
|
301 |
+
|
302 |
+
fig.update_layout(
|
303 |
+
xaxis_title="Growth Rate (%)",
|
304 |
+
yaxis_title="Topic",
|
305 |
+
plot_bgcolor='rgba(17, 17, 17, 0.8)',
|
306 |
+
paper_bgcolor='rgba(17, 17, 17, 0)',
|
307 |
+
font=dict(color='white'),
|
308 |
+
yaxis={'categoryorder': 'total ascending'}
|
309 |
+
)
|
310 |
+
|
311 |
+
return fig
|
312 |
+
|
313 |
+
def plot_popular_searches(popular_data):
|
314 |
+
"""Create a bar chart of popular searches"""
|
315 |
+
if not popular_data:
|
316 |
+
return None
|
317 |
+
|
318 |
+
df = pd.DataFrame(popular_data)
|
319 |
+
if len(df) == 0:
|
320 |
+
return None
|
321 |
+
|
322 |
+
df = df.sort_values('count', ascending=True)
|
323 |
+
|
324 |
+
fig = px.bar(
|
325 |
+
df,
|
326 |
+
y='query',
|
327 |
+
x='count',
|
328 |
+
title='Most Popular Search Terms',
|
329 |
+
labels={'query': 'Search Term', 'count': 'Number of Searches'},
|
330 |
+
orientation='h',
|
331 |
+
template='plotly_dark'
|
332 |
+
)
|
333 |
+
|
334 |
+
fig.update_layout(
|
335 |
+
xaxis_title="Number of Searches",
|
336 |
+
yaxis_title="Search Term",
|
337 |
+
plot_bgcolor='rgba(17, 17, 17, 0.8)',
|
338 |
+
paper_bgcolor='rgba(17, 17, 17, 0)',
|
339 |
+
font=dict(color='white'),
|
340 |
+
yaxis={'categoryorder': 'total ascending'}
|
341 |
+
)
|
342 |
+
|
343 |
+
return fig
|
344 |
+
|
345 |
+
def render_search_box():
|
346 |
+
"""Render the search box component"""
|
347 |
+
st.markdown("### Search Dark Web Content")
|
348 |
+
|
349 |
+
col1, col2 = st.columns([3, 1])
|
350 |
+
|
351 |
+
with col1:
|
352 |
+
search_query = st.text_input("Enter search terms", placeholder="Enter keywords to search dark web content...")
|
353 |
+
|
354 |
+
with col2:
|
355 |
+
categories = ["All Categories", "Marketplace", "Forum", "Paste Site", "Data Breach", "Hacking", "Cryptocurrency"]
|
356 |
+
selected_category = st.selectbox("Category", categories, index=0)
|
357 |
+
|
358 |
+
if selected_category == "All Categories":
|
359 |
+
selected_category = None
|
360 |
+
|
361 |
+
advanced_options = st.expander("Advanced Search Options", expanded=False)
|
362 |
+
with advanced_options:
|
363 |
+
col1, col2 = st.columns(2)
|
364 |
+
|
365 |
+
with col1:
|
366 |
+
date_range = st.selectbox(
|
367 |
+
"Date Range",
|
368 |
+
["All Time", "Last 24 Hours", "Last 7 Days", "Last 30 Days", "Last 90 Days", "Custom Range"]
|
369 |
+
)
|
370 |
+
|
371 |
+
include_images = st.checkbox("Include Images", value=False)
|
372 |
+
include_code = st.checkbox("Include Code Snippets", value=True)
|
373 |
+
|
374 |
+
with col2:
|
375 |
+
sources = st.multiselect(
|
376 |
+
"Sources",
|
377 |
+
["Dark Forums", "Marketplaces", "Paste Sites", "Leak Sites", "Chat Channels"],
|
378 |
+
default=["Dark Forums", "Marketplaces", "Leak Sites"]
|
379 |
+
)
|
380 |
+
|
381 |
+
sort_by = st.selectbox(
|
382 |
+
"Sort Results By",
|
383 |
+
["Relevance", "Date (Newest First)", "Date (Oldest First)"]
|
384 |
+
)
|
385 |
+
|
386 |
+
tags_input = st.text_input("Tags (comma-separated)", placeholder="Add tags to organize your search...")
|
387 |
+
|
388 |
+
search_button = st.button("Search Dark Web")
|
389 |
+
|
390 |
+
if search_button and search_query:
|
391 |
+
# Save search to history
|
392 |
+
user_id = getattr(st.session_state, "user_id", None)
|
393 |
+
|
394 |
+
# Process tags
|
395 |
+
tags = tags_input.strip() if tags_input else None
|
396 |
+
|
397 |
+
# Run the search
|
398 |
+
with st.spinner("Searching dark web..."):
|
399 |
+
search = asyncio.run(save_search_query(
|
400 |
+
query=search_query,
|
401 |
+
user_id=user_id,
|
402 |
+
category=selected_category,
|
403 |
+
tags=tags
|
404 |
+
))
|
405 |
+
|
406 |
+
if search:
|
407 |
+
st.success(f"Search completed: Found {search.result_count} results for '{search_query}'")
|
408 |
+
# In a real application, we would display results here
|
409 |
+
|
410 |
+
# Offer to save as a monitored search
|
411 |
+
save_col1, save_col2 = st.columns([3, 1])
|
412 |
+
with save_col1:
|
413 |
+
search_name = st.text_input(
|
414 |
+
"Save this search for monitoring (enter a name)",
|
415 |
+
placeholder="My saved search"
|
416 |
+
)
|
417 |
+
with save_col2:
|
418 |
+
frequency = st.selectbox(
|
419 |
+
"Check frequency",
|
420 |
+
["Manual only", "Daily", "Every 12 hours", "Every 6 hours", "Hourly"],
|
421 |
+
index=1
|
422 |
+
)
|
423 |
+
|
424 |
+
# Map to hours
|
425 |
+
freq_mapping = {
|
426 |
+
"Manual only": 0,
|
427 |
+
"Daily": 24,
|
428 |
+
"Every 12 hours": 12,
|
429 |
+
"Every 6 hours": 6,
|
430 |
+
"Hourly": 1
|
431 |
+
}
|
432 |
+
freq_hours = freq_mapping.get(frequency, 24)
|
433 |
+
|
434 |
+
if st.button("Save for Monitoring"):
|
435 |
+
if search_name:
|
436 |
+
saved = asyncio.run(create_new_saved_search(
|
437 |
+
name=search_name,
|
438 |
+
query=search_query,
|
439 |
+
user_id=user_id,
|
440 |
+
frequency=freq_hours,
|
441 |
+
category=selected_category
|
442 |
+
))
|
443 |
+
|
444 |
+
if saved:
|
445 |
+
st.success(f"Saved search '{search_name}' created successfully!")
|
446 |
+
else:
|
447 |
+
st.error("Please enter a name for your saved search")
|
448 |
+
else:
|
449 |
+
st.error("Failed to perform search. Please try again.")
|
450 |
+
|
451 |
+
def render_search_history():
|
452 |
+
"""Render the search history component"""
|
453 |
+
st.markdown("### Your Search History")
|
454 |
+
|
455 |
+
user_id = getattr(st.session_state, "user_id", None)
|
456 |
+
|
457 |
+
# Fetch search history
|
458 |
+
searches = asyncio.run(get_user_searches(user_id))
|
459 |
+
|
460 |
+
if not searches:
|
461 |
+
st.info("No search history found. Try searching for dark web content.")
|
462 |
+
return
|
463 |
+
|
464 |
+
# Convert to DataFrame for display
|
465 |
+
search_data = []
|
466 |
+
for search in searches:
|
467 |
+
search_data.append({
|
468 |
+
"ID": search.id,
|
469 |
+
"Query": search.query,
|
470 |
+
"Date": search.timestamp.strftime("%Y-%m-%d %H:%M"),
|
471 |
+
"Results": search.result_count,
|
472 |
+
"Category": search.category or "All",
|
473 |
+
"Saved": "✓" if search.is_saved else ""
|
474 |
+
})
|
475 |
+
|
476 |
+
df = pd.DataFrame(search_data)
|
477 |
+
|
478 |
+
# Display as table
|
479 |
+
st.dataframe(
|
480 |
+
df,
|
481 |
+
use_container_width=True,
|
482 |
+
column_config={
|
483 |
+
"ID": st.column_config.NumberColumn(format="%d"),
|
484 |
+
"Query": st.column_config.TextColumn(),
|
485 |
+
"Date": st.column_config.DatetimeColumn(),
|
486 |
+
"Results": st.column_config.NumberColumn(),
|
487 |
+
"Category": st.column_config.TextColumn(),
|
488 |
+
"Saved": st.column_config.TextColumn()
|
489 |
+
}
|
490 |
+
)
|
491 |
+
|
492 |
+
def render_saved_searches():
|
493 |
+
"""Render the saved searches component"""
|
494 |
+
st.markdown("### Saved Searches")
|
495 |
+
|
496 |
+
user_id = getattr(st.session_state, "user_id", None)
|
497 |
+
|
498 |
+
# Fetch saved searches
|
499 |
+
saved_searches = asyncio.run(get_user_saved_searches(user_id))
|
500 |
+
|
501 |
+
if not saved_searches:
|
502 |
+
st.info("No saved searches found. Save a search to monitor for new results.")
|
503 |
+
return
|
504 |
+
|
505 |
+
# Convert to DataFrame for display
|
506 |
+
search_data = []
|
507 |
+
for search in saved_searches:
|
508 |
+
# Calculate next run time
|
509 |
+
if search.last_run_at and search.frequency > 0:
|
510 |
+
next_run = search.last_run_at + timedelta(hours=search.frequency)
|
511 |
+
else:
|
512 |
+
next_run = "Manual only"
|
513 |
+
|
514 |
+
search_data.append({
|
515 |
+
"ID": search.id,
|
516 |
+
"Name": search.name,
|
517 |
+
"Query": search.query,
|
518 |
+
"Category": search.category or "All",
|
519 |
+
"Frequency": f"{search.frequency}h" if search.frequency > 0 else "Manual",
|
520 |
+
"Last Run": search.last_run_at.strftime("%Y-%m-%d %H:%M") if search.last_run_at else "Never",
|
521 |
+
"Next Run": next_run if isinstance(next_run, str) else next_run.strftime("%Y-%m-%d %H:%M"),
|
522 |
+
"Status": "Active" if search.is_active else "Paused"
|
523 |
+
})
|
524 |
+
|
525 |
+
df = pd.DataFrame(search_data)
|
526 |
+
|
527 |
+
# Display as table
|
528 |
+
st.dataframe(
|
529 |
+
df,
|
530 |
+
use_container_width=True
|
531 |
+
)
|
532 |
+
|
533 |
+
# Action buttons
|
534 |
+
col1, col2, col3 = st.columns(3)
|
535 |
+
|
536 |
+
with col1:
|
537 |
+
if st.button("Run Selected Searches Now"):
|
538 |
+
st.info("This would trigger manual execution of selected searches")
|
539 |
+
|
540 |
+
with col2:
|
541 |
+
if st.button("Pause Selected"):
|
542 |
+
st.info("This would pause the selected searches")
|
543 |
+
|
544 |
+
with col3:
|
545 |
+
if st.button("Delete Selected"):
|
546 |
+
st.info("This would delete the selected searches")
|
547 |
+
|
548 |
+
def render_trend_dashboard():
|
549 |
+
"""Render the trend dashboard component"""
|
550 |
+
st.markdown("## Search Trends Analysis")
|
551 |
+
|
552 |
+
# Time period selector
|
553 |
+
col1, col2 = st.columns([1, 3])
|
554 |
+
with col1:
|
555 |
+
time_period = st.selectbox(
|
556 |
+
"Time Period",
|
557 |
+
["Last 7 Days", "Last 30 Days", "Last 90 Days", "Last Year"],
|
558 |
+
index=1
|
559 |
+
)
|
560 |
+
|
561 |
+
# Map to days
|
562 |
+
period_mapping = {
|
563 |
+
"Last 7 Days": 7,
|
564 |
+
"Last 30 Days": 30,
|
565 |
+
"Last 90 Days": 90,
|
566 |
+
"Last Year": 365
|
567 |
+
}
|
568 |
+
days = period_mapping.get(time_period, 30)
|
569 |
+
|
570 |
+
with col2:
|
571 |
+
st.markdown("") # Spacing
|
572 |
+
|
573 |
+
# Fetch trend data
|
574 |
+
with st.spinner("Loading trend data..."):
|
575 |
+
trend_data = asyncio.run(get_trend_data(days=days))
|
576 |
+
|
577 |
+
# Create layout for visualizations
|
578 |
+
col1, col2 = st.columns(2)
|
579 |
+
|
580 |
+
with col1:
|
581 |
+
search_trend_fig = plot_search_trends(trend_data.get("frequency", []))
|
582 |
+
if search_trend_fig:
|
583 |
+
st.plotly_chart(search_trend_fig, use_container_width=True)
|
584 |
+
else:
|
585 |
+
st.error("Failed to load search trend data")
|
586 |
+
|
587 |
+
popular_searches_fig = plot_popular_searches(trend_data.get("popular_searches", []))
|
588 |
+
if popular_searches_fig:
|
589 |
+
st.plotly_chart(popular_searches_fig, use_container_width=True)
|
590 |
+
else:
|
591 |
+
st.error("Failed to load popular searches data")
|
592 |
+
|
593 |
+
with col2:
|
594 |
+
trending_topics_fig = plot_trending_topics(trend_data.get("trending_topics", []))
|
595 |
+
if trending_topics_fig:
|
596 |
+
st.plotly_chart(trending_topics_fig, use_container_width=True)
|
597 |
+
else:
|
598 |
+
st.error("Failed to load trending topics data")
|
599 |
+
|
600 |
+
category_fig = plot_category_distribution(trend_data.get("categories", []))
|
601 |
+
if category_fig:
|
602 |
+
st.plotly_chart(category_fig, use_container_width=True)
|
603 |
+
else:
|
604 |
+
st.error("Failed to load category distribution data")
|
605 |
+
|
606 |
+
# Display trend insights
|
607 |
+
st.markdown("### Trend Insights")
|
608 |
+
|
609 |
+
col1, col2, col3 = st.columns(3)
|
610 |
+
|
611 |
+
with col1:
|
612 |
+
velocity = trend_data.get("velocity", 0)
|
613 |
+
velocity_color = "green" if velocity > 0 else "red"
|
614 |
+
velocity_icon = "↗️" if velocity > 0 else "↘️"
|
615 |
+
st.markdown(f"""
|
616 |
+
### Search Velocity
|
617 |
+
<h2 style="color:{velocity_color}">{velocity_icon} {abs(velocity):.1f}%</h2>
|
618 |
+
<p>Change in search volume compared to previous period</p>
|
619 |
+
""", unsafe_allow_html=True)
|
620 |
+
|
621 |
+
with col2:
|
622 |
+
total_searches = trend_data.get("total_searches", {}).get("total", 0)
|
623 |
+
st.markdown(f"""
|
624 |
+
### Total Searches
|
625 |
+
<h2>{total_searches:,}</h2>
|
626 |
+
<p>Total searches in the selected period</p>
|
627 |
+
""", unsafe_allow_html=True)
|
628 |
+
|
629 |
+
with col3:
|
630 |
+
top_topic = "None"
|
631 |
+
top_growth = 0
|
632 |
+
if trend_data.get("trending_topics"):
|
633 |
+
top_item = max(trend_data["trending_topics"], key=lambda x: x.get("growth_rate", 0))
|
634 |
+
top_topic = top_item.get("topic", "None")
|
635 |
+
top_growth = top_item.get("growth_rate", 0)
|
636 |
+
|
637 |
+
st.markdown(f"""
|
638 |
+
### Fastest Growing Topic
|
639 |
+
<h2>{top_topic}</h2>
|
640 |
+
<p>Growth rate: {top_growth:.1f}%</p>
|
641 |
+
""", unsafe_allow_html=True)
|
642 |
+
|
643 |
+
# Display emerging themes (if available)
|
644 |
+
if trend_data.get("trending_topics"):
|
645 |
+
st.markdown("### Emerging Dark Web Themes")
|
646 |
+
|
647 |
+
# Group topics by similar growth rates
|
648 |
+
topics = trend_data["trending_topics"]
|
649 |
+
|
650 |
+
# Display as topic clusters with common themes
|
651 |
+
theme_groups = {
|
652 |
+
"High Growth": [t for t in topics if t.get("growth_rate", 0) > 15],
|
653 |
+
"Moderate Growth": [t for t in topics if 5 <= t.get("growth_rate", 0) <= 15],
|
654 |
+
"Stable": [t for t in topics if t.get("growth_rate", 0) < 5]
|
655 |
+
}
|
656 |
+
|
657 |
+
for theme, items in theme_groups.items():
|
658 |
+
if items:
|
659 |
+
st.markdown(f"#### {theme}")
|
660 |
+
themes_text = ", ".join([f"{t.get('topic')} ({t.get('growth_rate', 0):.1f}%)" for t in items])
|
661 |
+
st.markdown(f"<p>{themes_text}</p>", unsafe_allow_html=True)
|
662 |
+
|
663 |
+
def render_search_trends():
|
664 |
+
"""Main function to render the search trends component"""
|
665 |
+
st.title("Dark Web Search & Trends")
|
666 |
+
|
667 |
+
tabs = st.tabs([
|
668 |
+
"Search Dark Web",
|
669 |
+
"Search History",
|
670 |
+
"Saved Searches",
|
671 |
+
"Trend Analysis"
|
672 |
+
])
|
673 |
+
|
674 |
+
with tabs[0]:
|
675 |
+
render_search_box()
|
676 |
+
|
677 |
+
with tabs[1]:
|
678 |
+
render_search_history()
|
679 |
+
|
680 |
+
with tabs[2]:
|
681 |
+
render_saved_searches()
|
682 |
+
|
683 |
+
with tabs[3]:
|
684 |
+
render_trend_dashboard()
|
components/subscriptions.py
ADDED
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Subscription management component.
|
3 |
+
|
4 |
+
This component provides UI for managing subscription plans.
|
5 |
+
"""
|
6 |
+
import os
|
7 |
+
import streamlit as st
|
8 |
+
import pandas as pd
|
9 |
+
from datetime import datetime
|
10 |
+
import json
|
11 |
+
|
12 |
+
import stripe
|
13 |
+
from streamlit_extras.colored_header import colored_header
|
14 |
+
from streamlit_extras.metric_cards import style_metric_cards
|
15 |
+
|
16 |
+
from src.streamlit_subscription_services import (
|
17 |
+
get_subscription_plans_df,
|
18 |
+
get_subscription_plan,
|
19 |
+
get_user_current_subscription,
|
20 |
+
subscribe_user_to_plan,
|
21 |
+
cancel_subscription,
|
22 |
+
initialize_default_plans
|
23 |
+
)
|
24 |
+
|
25 |
+
# Set up Stripe publishable key for client-side usage
|
26 |
+
STRIPE_PUBLISHABLE_KEY = os.environ.get("STRIPE_PUBLISHABLE_KEY")
|
27 |
+
|
28 |
+
|
29 |
+
def format_price(price):
|
30 |
+
"""Format price display."""
|
31 |
+
if price == 0:
|
32 |
+
return "Free"
|
33 |
+
return f"${price:.2f}"
|
34 |
+
|
35 |
+
|
36 |
+
def render_pricing_card(plan, selected_period="monthly"):
|
37 |
+
"""Render a pricing card for a subscription plan."""
|
38 |
+
plan_id = plan["id"]
|
39 |
+
plan_name = plan["name"]
|
40 |
+
plan_tier = plan["tier"]
|
41 |
+
description = plan["description"]
|
42 |
+
|
43 |
+
# Determine price based on selected period
|
44 |
+
if selected_period == "monthly":
|
45 |
+
price = plan["price_monthly"]
|
46 |
+
period_text = "per month"
|
47 |
+
billing_term = "monthly"
|
48 |
+
else:
|
49 |
+
price = plan["price_annually"]
|
50 |
+
period_text = "per year"
|
51 |
+
billing_term = "annually"
|
52 |
+
|
53 |
+
# Format price for display
|
54 |
+
price_display = format_price(price)
|
55 |
+
|
56 |
+
# Feature list
|
57 |
+
features = [
|
58 |
+
f"✓ {plan['max_alerts'] if plan['max_alerts'] > 0 else 'Unlimited'} alerts",
|
59 |
+
f"✓ {plan['max_reports'] if plan['max_reports'] > 0 else 'Unlimited'} reports",
|
60 |
+
f"✓ {plan['max_searches_per_day'] if plan['max_searches_per_day'] > 0 else 'Unlimited'} searches per day",
|
61 |
+
f"✓ {plan['max_monitoring_keywords'] if plan['max_monitoring_keywords'] > 0 else 'Unlimited'} monitoring keywords",
|
62 |
+
f"✓ {plan['max_data_retention_days']} days data retention"
|
63 |
+
]
|
64 |
+
|
65 |
+
if plan["supports_api_access"]:
|
66 |
+
features.append("✓ API access")
|
67 |
+
|
68 |
+
if plan["supports_live_feed"]:
|
69 |
+
features.append("✓ Live feed")
|
70 |
+
|
71 |
+
if plan["supports_dark_web_monitoring"]:
|
72 |
+
features.append("✓ Dark web monitoring")
|
73 |
+
|
74 |
+
if plan["supports_export"]:
|
75 |
+
features.append("✓ Data export")
|
76 |
+
|
77 |
+
if plan["supports_advanced_analytics"]:
|
78 |
+
features.append("✓ Advanced analytics")
|
79 |
+
|
80 |
+
# Card style based on tier
|
81 |
+
if plan_tier == "free":
|
82 |
+
border_color = "#3498db" # Blue
|
83 |
+
header_color = "#3498db"
|
84 |
+
elif plan_tier == "basic":
|
85 |
+
border_color = "#2ecc71" # Green
|
86 |
+
header_color = "#2ecc71"
|
87 |
+
elif plan_tier == "professional":
|
88 |
+
border_color = "#f39c12" # Orange
|
89 |
+
header_color = "#f39c12"
|
90 |
+
else: # Enterprise
|
91 |
+
border_color = "#9b59b6" # Purple
|
92 |
+
header_color = "#9b59b6"
|
93 |
+
|
94 |
+
# Render card
|
95 |
+
st.markdown(f"""
|
96 |
+
<div style="border: 2px solid {border_color}; border-radius: 10px; padding: 20px; height: 100%;">
|
97 |
+
<h3 style="color: {header_color}; text-align: center;">{plan_name}</h3>
|
98 |
+
<h2 style="text-align: center; margin-top: 10px; margin-bottom: 5px;">{price_display}</h2>
|
99 |
+
<p style="text-align: center; color: #999; margin-bottom: 20px;">{period_text}</p>
|
100 |
+
<p style="text-align: center; margin-bottom: 20px;">{description}</p>
|
101 |
+
<div style="margin-bottom: 20px;">
|
102 |
+
{"<br>".join([f'<div style="margin-bottom: 8px;">{feature}</div>' for feature in features])}
|
103 |
+
</div>
|
104 |
+
</div>
|
105 |
+
""", unsafe_allow_html=True)
|
106 |
+
|
107 |
+
# Subscribe button
|
108 |
+
if st.button(f"Choose {plan_name}", key=f"choose_{plan_id}_{selected_period}"):
|
109 |
+
if not plan_tier == "free" and STRIPE_PUBLISHABLE_KEY:
|
110 |
+
st.session_state.show_payment_form = True
|
111 |
+
st.session_state.selected_plan = plan
|
112 |
+
st.session_state.selected_billing_period = billing_term
|
113 |
+
else:
|
114 |
+
# Free plan - no payment needed
|
115 |
+
# Assume user ID 1 for demonstration
|
116 |
+
user_id = 1
|
117 |
+
subscription = subscribe_user_to_plan(
|
118 |
+
user_id=user_id,
|
119 |
+
plan_id=plan_id,
|
120 |
+
billing_period=billing_term,
|
121 |
+
create_stripe_subscription=False
|
122 |
+
)
|
123 |
+
|
124 |
+
if subscription:
|
125 |
+
st.success(f"You're now subscribed to the {plan_name} plan!")
|
126 |
+
st.session_state.current_user_subscription = subscription
|
127 |
+
else:
|
128 |
+
st.error("Failed to subscribe. Please try again.")
|
129 |
+
|
130 |
+
st.rerun()
|
131 |
+
|
132 |
+
|
133 |
+
def render_payment_form():
|
134 |
+
"""Render the payment form for subscription."""
|
135 |
+
if not STRIPE_PUBLISHABLE_KEY:
|
136 |
+
st.error("Stripe API key is not configured. Payment processing is unavailable.")
|
137 |
+
return
|
138 |
+
|
139 |
+
plan = st.session_state.selected_plan
|
140 |
+
billing_period = st.session_state.selected_billing_period
|
141 |
+
|
142 |
+
st.markdown("### Payment Information")
|
143 |
+
|
144 |
+
# Calculate amount based on billing period
|
145 |
+
if billing_period == "monthly":
|
146 |
+
amount = plan["price_monthly"]
|
147 |
+
else:
|
148 |
+
amount = plan["price_annually"]
|
149 |
+
|
150 |
+
st.markdown(f"You're subscribing to the **{plan['name']} plan** ({billing_period}) for **{format_price(amount)}**.")
|
151 |
+
|
152 |
+
# Name and email inputs
|
153 |
+
col1, col2 = st.columns(2)
|
154 |
+
|
155 |
+
with col1:
|
156 |
+
name = st.text_input("Full Name")
|
157 |
+
|
158 |
+
with col2:
|
159 |
+
email = st.text_input("Email Address")
|
160 |
+
|
161 |
+
# HTML/JS for Stripe Elements
|
162 |
+
st.markdown("""
|
163 |
+
<div id="card-element" style="padding: 10px; border: 1px solid #ccc; border-radius: 4px; margin-bottom: 20px;"></div>
|
164 |
+
<div id="card-errors" style="color: #e74c3c; margin-bottom: 20px;"></div>
|
165 |
+
|
166 |
+
<script src="https://js.stripe.com/v3/"></script>
|
167 |
+
<script type="text/javascript">
|
168 |
+
// Initialize Stripe with publishable key
|
169 |
+
var stripe = Stripe('%s');
|
170 |
+
var elements = stripe.elements();
|
171 |
+
|
172 |
+
// Create card element
|
173 |
+
var card = elements.create('card');
|
174 |
+
card.mount('#card-element');
|
175 |
+
|
176 |
+
// Handle real-time validation errors
|
177 |
+
card.addEventListener('change', function(event) {
|
178 |
+
var displayError = document.getElementById('card-errors');
|
179 |
+
if (event.error) {
|
180 |
+
displayError.textContent = event.error.message;
|
181 |
+
} else {
|
182 |
+
displayError.textContent = '';
|
183 |
+
}
|
184 |
+
});
|
185 |
+
|
186 |
+
// Set up payment method creation
|
187 |
+
window.createPaymentMethod = function() {
|
188 |
+
stripe.createPaymentMethod({
|
189 |
+
type: 'card',
|
190 |
+
card: card,
|
191 |
+
billing_details: {
|
192 |
+
name: '%s',
|
193 |
+
email: '%s'
|
194 |
+
}
|
195 |
+
}).then(function(result) {
|
196 |
+
if (result.error) {
|
197 |
+
var errorElement = document.getElementById('card-errors');
|
198 |
+
errorElement.textContent = result.error.message;
|
199 |
+
} else {
|
200 |
+
// Send payment method ID to Streamlit
|
201 |
+
window.parent.postMessage({
|
202 |
+
type: 'payment-method-created',
|
203 |
+
paymentMethodId: result.paymentMethod.id
|
204 |
+
}, '*');
|
205 |
+
}
|
206 |
+
});
|
207 |
+
}
|
208 |
+
</script>
|
209 |
+
""" % (STRIPE_PUBLISHABLE_KEY, name, email), unsafe_allow_html=True)
|
210 |
+
|
211 |
+
# Submit button
|
212 |
+
if st.button("Subscribe Now", key="subscribe_button"):
|
213 |
+
# Call JavaScript function to create payment method
|
214 |
+
st.markdown("""
|
215 |
+
<script>
|
216 |
+
window.createPaymentMethod();
|
217 |
+
</script>
|
218 |
+
""", unsafe_allow_html=True)
|
219 |
+
|
220 |
+
# In a real implementation, we would need to handle the callback from Stripe
|
221 |
+
# For now, simulate success for demonstration
|
222 |
+
payment_method_id = "pm_" + "".join([str(i) for i in range(24)]) # Fake payment method ID
|
223 |
+
|
224 |
+
# Subscribe user with payment method
|
225 |
+
# Assume user ID 1 for demonstration
|
226 |
+
user_id = 1
|
227 |
+
subscription = subscribe_user_to_plan(
|
228 |
+
user_id=user_id,
|
229 |
+
plan_id=plan["id"],
|
230 |
+
billing_period=billing_period,
|
231 |
+
create_stripe_subscription=True,
|
232 |
+
payment_method_id=payment_method_id
|
233 |
+
)
|
234 |
+
|
235 |
+
if subscription:
|
236 |
+
st.success(f"You're now subscribed to the {plan['name']} plan!")
|
237 |
+
st.session_state.show_payment_form = False
|
238 |
+
st.session_state.current_user_subscription = subscription
|
239 |
+
else:
|
240 |
+
st.error("Failed to subscribe. Please try again.")
|
241 |
+
|
242 |
+
st.rerun()
|
243 |
+
|
244 |
+
# Cancel button
|
245 |
+
if st.button("Cancel", key="cancel_payment"):
|
246 |
+
st.session_state.show_payment_form = False
|
247 |
+
st.rerun()
|
248 |
+
|
249 |
+
|
250 |
+
def render_subscription_dashboard(user_id=1):
|
251 |
+
"""Render the subscription dashboard for the current user."""
|
252 |
+
# Get current subscription
|
253 |
+
subscription = get_user_current_subscription(user_id)
|
254 |
+
st.session_state.current_user_subscription = subscription
|
255 |
+
|
256 |
+
if subscription:
|
257 |
+
plan_tier = subscription.get("plan_tier", "").capitalize()
|
258 |
+
plan_name = subscription.get("plan_name", "Unknown Plan")
|
259 |
+
status = subscription.get("status", "").capitalize()
|
260 |
+
billing_period = subscription.get("billing_period", "").capitalize()
|
261 |
+
|
262 |
+
period_start = subscription.get("current_period_start")
|
263 |
+
period_end = subscription.get("current_period_end")
|
264 |
+
|
265 |
+
# Format dates
|
266 |
+
start_date = period_start.strftime("%B %d, %Y") if period_start else "N/A"
|
267 |
+
end_date = period_end.strftime("%B %d, %Y") if period_end else "N/A"
|
268 |
+
|
269 |
+
st.markdown(f"### Current Subscription: {plan_name}")
|
270 |
+
|
271 |
+
col1, col2, col3 = st.columns(3)
|
272 |
+
|
273 |
+
with col1:
|
274 |
+
st.metric("Status", status)
|
275 |
+
|
276 |
+
with col2:
|
277 |
+
st.metric("Billing Period", billing_period)
|
278 |
+
|
279 |
+
with col3:
|
280 |
+
days_left = (period_end - datetime.now()).days if period_end else 0
|
281 |
+
days_left = max(0, days_left)
|
282 |
+
st.metric("Days Remaining", days_left)
|
283 |
+
|
284 |
+
style_metric_cards()
|
285 |
+
|
286 |
+
st.markdown(f"""
|
287 |
+
<div style="margin-top: 20px; padding: 15px; background-color: rgba(44, 62, 80, 0.2); border-radius: 6px;">
|
288 |
+
<p><strong>Billing Period:</strong> {start_date} to {end_date}</p>
|
289 |
+
</div>
|
290 |
+
""", unsafe_allow_html=True)
|
291 |
+
|
292 |
+
# Cancel subscription button
|
293 |
+
if status.lower() != "canceled":
|
294 |
+
if st.button("Cancel Subscription", key="cancel_subscription"):
|
295 |
+
if cancel_subscription(subscription["id"]):
|
296 |
+
st.success("Your subscription has been canceled. You'll still have access until the end of your billing period.")
|
297 |
+
st.rerun()
|
298 |
+
else:
|
299 |
+
st.error("Failed to cancel subscription. Please try again.")
|
300 |
+
|
301 |
+
# View other plans button
|
302 |
+
if st.button("View Available Plans", key="view_plans"):
|
303 |
+
st.session_state.show_pricing_table = True
|
304 |
+
st.rerun()
|
305 |
+
|
306 |
+
|
307 |
+
def render_subscription_metrics(user_id=1):
|
308 |
+
"""Render subscription usage metrics for the current user."""
|
309 |
+
# Get current subscription
|
310 |
+
subscription = st.session_state.get("current_user_subscription") or get_user_current_subscription(user_id)
|
311 |
+
|
312 |
+
if not subscription:
|
313 |
+
return
|
314 |
+
|
315 |
+
# Get subscription plan
|
316 |
+
plan = get_subscription_plan(subscription["plan_id"])
|
317 |
+
|
318 |
+
if not plan:
|
319 |
+
return
|
320 |
+
|
321 |
+
st.markdown("### Usage Metrics")
|
322 |
+
|
323 |
+
# Create metrics
|
324 |
+
col1, col2 = st.columns(2)
|
325 |
+
col3, col4 = st.columns(2)
|
326 |
+
|
327 |
+
with col1:
|
328 |
+
max_alerts = plan["max_alerts"]
|
329 |
+
alerts_used = 3 # Placeholder value
|
330 |
+
alerts_percent = (alerts_used / max_alerts * 100) if max_alerts > 0 else 0
|
331 |
+
st.metric("Alerts", f"{alerts_used}/{max_alerts if max_alerts > 0 else '∞'}")
|
332 |
+
st.progress(min(alerts_percent, 100) / 100)
|
333 |
+
|
334 |
+
with col2:
|
335 |
+
max_reports = plan["max_reports"]
|
336 |
+
reports_used = 1 # Placeholder value
|
337 |
+
reports_percent = (reports_used / max_reports * 100) if max_reports > 0 else 0
|
338 |
+
st.metric("Reports", f"{reports_used}/{max_reports if max_reports > 0 else '∞'}")
|
339 |
+
st.progress(min(reports_percent, 100) / 100)
|
340 |
+
|
341 |
+
with col3:
|
342 |
+
max_searches = plan["max_searches_per_day"]
|
343 |
+
searches_used = 8 # Placeholder value
|
344 |
+
searches_percent = (searches_used / max_searches * 100) if max_searches > 0 else 0
|
345 |
+
st.metric("Daily Searches", f"{searches_used}/{max_searches if max_searches > 0 else '∞'}")
|
346 |
+
st.progress(min(searches_percent, 100) / 100)
|
347 |
+
|
348 |
+
with col4:
|
349 |
+
max_keywords = plan["max_monitoring_keywords"]
|
350 |
+
keywords_used = 4 # Placeholder value
|
351 |
+
keywords_percent = (keywords_used / max_keywords * 100) if max_keywords > 0 else 0
|
352 |
+
st.metric("Monitoring Keywords", f"{keywords_used}/{max_keywords if max_keywords > 0 else '∞'}")
|
353 |
+
st.progress(min(keywords_percent, 100) / 100)
|
354 |
+
|
355 |
+
# List other features
|
356 |
+
st.markdown("### Features")
|
357 |
+
|
358 |
+
features = []
|
359 |
+
|
360 |
+
if plan["supports_api_access"]:
|
361 |
+
features.append("✓ API Access")
|
362 |
+
else:
|
363 |
+
features.append("✗ API Access")
|
364 |
+
|
365 |
+
if plan["supports_live_feed"]:
|
366 |
+
features.append("✓ Live Feed")
|
367 |
+
else:
|
368 |
+
features.append("✗ Live Feed")
|
369 |
+
|
370 |
+
if plan["supports_dark_web_monitoring"]:
|
371 |
+
features.append("✓ Dark Web Monitoring")
|
372 |
+
else:
|
373 |
+
features.append("✗ Dark Web Monitoring")
|
374 |
+
|
375 |
+
if plan["supports_export"]:
|
376 |
+
features.append("✓ Data Export")
|
377 |
+
else:
|
378 |
+
features.append("✗ Data Export")
|
379 |
+
|
380 |
+
if plan["supports_advanced_analytics"]:
|
381 |
+
features.append("✓ Advanced Analytics")
|
382 |
+
else:
|
383 |
+
features.append("✗ Advanced Analytics")
|
384 |
+
|
385 |
+
# Display features
|
386 |
+
cols = st.columns(len(features))
|
387 |
+
for i, feature in enumerate(features):
|
388 |
+
with cols[i]:
|
389 |
+
if feature.startswith("✓"):
|
390 |
+
st.markdown(f'<div style="text-align: center; color: #2ecc71; font-weight: bold;">{feature}</div>', unsafe_allow_html=True)
|
391 |
+
else:
|
392 |
+
st.markdown(f'<div style="text-align: center; color: #e74c3c; font-weight: bold;">{feature}</div>', unsafe_allow_html=True)
|
393 |
+
|
394 |
+
|
395 |
+
def render_pricing_table():
|
396 |
+
"""Render a pricing table with all subscription plans."""
|
397 |
+
st.markdown("## Subscription Plans")
|
398 |
+
|
399 |
+
# Billing period toggle
|
400 |
+
selected_period = st.radio(
|
401 |
+
"Billing Period",
|
402 |
+
["monthly", "annually"],
|
403 |
+
format_func=lambda x: x.capitalize(),
|
404 |
+
horizontal=True
|
405 |
+
)
|
406 |
+
|
407 |
+
# Note about annual savings
|
408 |
+
if selected_period == "annually":
|
409 |
+
st.info("Save up to 20% with annual billing")
|
410 |
+
|
411 |
+
# Get subscription plans
|
412 |
+
plans_df = get_subscription_plans_df()
|
413 |
+
|
414 |
+
if plans_df.empty:
|
415 |
+
st.warning("No subscription plans available.")
|
416 |
+
return
|
417 |
+
|
418 |
+
# Convert DataFrame to list of dictionaries
|
419 |
+
plans = plans_df.to_dict("records")
|
420 |
+
|
421 |
+
# Create a column for each plan
|
422 |
+
cols = st.columns(len(plans))
|
423 |
+
|
424 |
+
# Render pricing cards
|
425 |
+
for i, plan in enumerate(plans):
|
426 |
+
with cols[i]:
|
427 |
+
render_pricing_card(plan, selected_period)
|
428 |
+
|
429 |
+
# Close button
|
430 |
+
if st.button("Back to Dashboard", key="close_pricing"):
|
431 |
+
st.session_state.show_pricing_table = False
|
432 |
+
st.rerun()
|
433 |
+
|
434 |
+
|
435 |
+
def render_subscriptions():
|
436 |
+
"""
|
437 |
+
Main function to render the subscription management component.
|
438 |
+
"""
|
439 |
+
colored_header(
|
440 |
+
label="Subscription Management",
|
441 |
+
description="Manage your subscription and billing",
|
442 |
+
color_name="violet-70"
|
443 |
+
)
|
444 |
+
|
445 |
+
# Initialize default plans if needed
|
446 |
+
initialize_default_plans()
|
447 |
+
|
448 |
+
# Initialize session state
|
449 |
+
if "show_pricing_table" not in st.session_state:
|
450 |
+
st.session_state.show_pricing_table = False
|
451 |
+
|
452 |
+
if "show_payment_form" not in st.session_state:
|
453 |
+
st.session_state.show_payment_form = False
|
454 |
+
|
455 |
+
if "selected_plan" not in st.session_state:
|
456 |
+
st.session_state.selected_plan = None
|
457 |
+
|
458 |
+
if "selected_billing_period" not in st.session_state:
|
459 |
+
st.session_state.selected_billing_period = "monthly"
|
460 |
+
|
461 |
+
if "current_user_subscription" not in st.session_state:
|
462 |
+
st.session_state.current_user_subscription = None
|
463 |
+
|
464 |
+
# Check if we need to show payment form
|
465 |
+
if st.session_state.show_payment_form:
|
466 |
+
render_payment_form()
|
467 |
+
return
|
468 |
+
|
469 |
+
# Check if we need to show pricing table
|
470 |
+
if st.session_state.show_pricing_table:
|
471 |
+
render_pricing_table()
|
472 |
+
return
|
473 |
+
|
474 |
+
# Render current subscription status and dashboard
|
475 |
+
render_subscription_dashboard()
|
476 |
+
|
477 |
+
# Render subscription metrics
|
478 |
+
render_subscription_metrics()
|
components/threats.py
ADDED
@@ -0,0 +1,543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import plotly.express as px
|
4 |
+
import plotly.graph_objects as go
|
5 |
+
import numpy as np
|
6 |
+
from datetime import datetime, timedelta
|
7 |
+
import altair as alt
|
8 |
+
|
9 |
+
def render_threats():
|
10 |
+
st.title("Threat Detection & Analysis")
|
11 |
+
|
12 |
+
# Filters section
|
13 |
+
with st.container():
|
14 |
+
st.subheader("Threat Filters")
|
15 |
+
|
16 |
+
col1, col2, col3, col4 = st.columns(4)
|
17 |
+
|
18 |
+
with col1:
|
19 |
+
severity_filter = st.multiselect(
|
20 |
+
"Severity Level",
|
21 |
+
["Critical", "High", "Medium", "Low"],
|
22 |
+
default=["Critical", "High"]
|
23 |
+
)
|
24 |
+
|
25 |
+
with col2:
|
26 |
+
threat_type = st.multiselect(
|
27 |
+
"Threat Type",
|
28 |
+
["Data Breach", "Ransomware", "Phishing", "Malware", "Identity Theft", "Zero-day Exploit"],
|
29 |
+
default=["Data Breach", "Ransomware"]
|
30 |
+
)
|
31 |
+
|
32 |
+
with col3:
|
33 |
+
date_range = st.selectbox(
|
34 |
+
"Time Range",
|
35 |
+
["Last 24 Hours", "Last 7 Days", "Last 30 Days", "Last Quarter", "Custom Range"],
|
36 |
+
index=1
|
37 |
+
)
|
38 |
+
|
39 |
+
with col4:
|
40 |
+
st.text_input("Search Keywords", placeholder="e.g. healthcare, banking")
|
41 |
+
|
42 |
+
st.button("Apply Filters", type="primary")
|
43 |
+
|
44 |
+
# Threat overview metrics
|
45 |
+
st.markdown("### Threat Overview")
|
46 |
+
|
47 |
+
metric_col1, metric_col2, metric_col3, metric_col4, metric_col5 = st.columns(5)
|
48 |
+
|
49 |
+
with metric_col1:
|
50 |
+
st.metric(
|
51 |
+
label="Critical Threats",
|
52 |
+
value="8",
|
53 |
+
delta="2",
|
54 |
+
delta_color="inverse"
|
55 |
+
)
|
56 |
+
|
57 |
+
with metric_col2:
|
58 |
+
st.metric(
|
59 |
+
label="High Threats",
|
60 |
+
value="19",
|
61 |
+
delta="4",
|
62 |
+
delta_color="inverse"
|
63 |
+
)
|
64 |
+
|
65 |
+
with metric_col3:
|
66 |
+
st.metric(
|
67 |
+
label="Medium Threats",
|
68 |
+
value="35",
|
69 |
+
delta="0",
|
70 |
+
delta_color="normal"
|
71 |
+
)
|
72 |
+
|
73 |
+
with metric_col4:
|
74 |
+
st.metric(
|
75 |
+
label="Low Threats",
|
76 |
+
value="52",
|
77 |
+
delta="-5",
|
78 |
+
delta_color="normal"
|
79 |
+
)
|
80 |
+
|
81 |
+
with metric_col5:
|
82 |
+
st.metric(
|
83 |
+
label="Avg. Response Time",
|
84 |
+
value="47m",
|
85 |
+
delta="-13m",
|
86 |
+
delta_color="normal"
|
87 |
+
)
|
88 |
+
|
89 |
+
# Threat detection visualization
|
90 |
+
tab1, tab2, tab3 = st.tabs(["Threat Timeline", "Category Analysis", "Threat Details"])
|
91 |
+
|
92 |
+
with tab1:
|
93 |
+
st.subheader("Threat Detection Timeline")
|
94 |
+
|
95 |
+
# Generate dates and times for the past 14 days with hourly granularity
|
96 |
+
now = datetime.now()
|
97 |
+
timeline_data = []
|
98 |
+
|
99 |
+
for day in range(14, 0, -1):
|
100 |
+
base_date = now - timedelta(days=day)
|
101 |
+
for hour in range(0, 24, 2): # Every 2 hours
|
102 |
+
timestamp = base_date + timedelta(hours=hour)
|
103 |
+
|
104 |
+
# Random threat count for each severity level
|
105 |
+
if np.random.random() > 0.7: # 30% chance of critical
|
106 |
+
severity = "Critical"
|
107 |
+
count = np.random.randint(1, 4)
|
108 |
+
elif np.random.random() > 0.5: # 20% chance of high
|
109 |
+
severity = "High"
|
110 |
+
count = np.random.randint(1, 6)
|
111 |
+
elif np.random.random() > 0.3: # 20% chance of medium
|
112 |
+
severity = "Medium"
|
113 |
+
count = np.random.randint(1, 8)
|
114 |
+
else: # 30% chance of low
|
115 |
+
severity = "Low"
|
116 |
+
count = np.random.randint(1, 10)
|
117 |
+
|
118 |
+
timeline_data.append({
|
119 |
+
"timestamp": timestamp,
|
120 |
+
"severity": severity,
|
121 |
+
"count": count
|
122 |
+
})
|
123 |
+
|
124 |
+
timeline_df = pd.DataFrame(timeline_data)
|
125 |
+
|
126 |
+
# Convert to a format suitable for visualization
|
127 |
+
# Group by date and severity to get counts
|
128 |
+
timeline_df['date'] = timeline_df['timestamp'].dt.strftime('%Y-%m-%d')
|
129 |
+
|
130 |
+
# Create a scatter plot for the timeline with varying dot sizes based on count
|
131 |
+
fig = px.scatter(
|
132 |
+
timeline_df,
|
133 |
+
x='timestamp',
|
134 |
+
y='severity',
|
135 |
+
size='count',
|
136 |
+
color='severity',
|
137 |
+
color_discrete_map={
|
138 |
+
'Critical': '#E74C3C',
|
139 |
+
'High': '#F1C40F',
|
140 |
+
'Medium': '#3498DB',
|
141 |
+
'Low': '#2ECC71'
|
142 |
+
},
|
143 |
+
hover_data=['count'],
|
144 |
+
height=400
|
145 |
+
)
|
146 |
+
|
147 |
+
fig.update_layout(
|
148 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
149 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
150 |
+
xaxis=dict(
|
151 |
+
showgrid=False,
|
152 |
+
title=None,
|
153 |
+
tickfont=dict(color='#ECF0F1')
|
154 |
+
),
|
155 |
+
yaxis=dict(
|
156 |
+
showgrid=False,
|
157 |
+
title=None,
|
158 |
+
tickfont=dict(color='#ECF0F1'),
|
159 |
+
categoryorder='array',
|
160 |
+
categoryarray=['Low', 'Medium', 'High', 'Critical']
|
161 |
+
),
|
162 |
+
margin=dict(l=10, r=10, t=10, b=10)
|
163 |
+
)
|
164 |
+
|
165 |
+
st.plotly_chart(fig, use_container_width=True)
|
166 |
+
|
167 |
+
with tab2:
|
168 |
+
col1, col2 = st.columns(2)
|
169 |
+
|
170 |
+
with col1:
|
171 |
+
st.subheader("Threat Categories")
|
172 |
+
|
173 |
+
# Threat category distribution
|
174 |
+
categories = ['Data Breach', 'Ransomware', 'Phishing', 'Malware', 'Identity Theft', 'Zero-day Exploit']
|
175 |
+
values = [38, 24, 18, 14, 6, 8]
|
176 |
+
|
177 |
+
category_data = pd.DataFrame({
|
178 |
+
'Category': categories,
|
179 |
+
'Count': values
|
180 |
+
})
|
181 |
+
|
182 |
+
fig = px.bar(
|
183 |
+
category_data,
|
184 |
+
x='Category',
|
185 |
+
y='Count',
|
186 |
+
color='Count',
|
187 |
+
color_continuous_scale=['#2ECC71', '#3498DB', '#F1C40F', '#E74C3C'],
|
188 |
+
height=350
|
189 |
+
)
|
190 |
+
|
191 |
+
fig.update_layout(
|
192 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
193 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
194 |
+
coloraxis_showscale=False,
|
195 |
+
xaxis=dict(
|
196 |
+
title=None,
|
197 |
+
tickfont=dict(color='#ECF0F1')
|
198 |
+
),
|
199 |
+
yaxis=dict(
|
200 |
+
title=None,
|
201 |
+
showgrid=True,
|
202 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
203 |
+
tickfont=dict(color='#ECF0F1')
|
204 |
+
),
|
205 |
+
margin=dict(l=10, r=10, t=10, b=10)
|
206 |
+
)
|
207 |
+
|
208 |
+
st.plotly_chart(fig, use_container_width=True)
|
209 |
+
|
210 |
+
with col2:
|
211 |
+
st.subheader("Threat Severity Distribution")
|
212 |
+
|
213 |
+
# Severity distribution
|
214 |
+
severity_labels = ['Critical', 'High', 'Medium', 'Low']
|
215 |
+
severity_values = [8, 19, 35, 52]
|
216 |
+
|
217 |
+
fig = px.pie(
|
218 |
+
names=severity_labels,
|
219 |
+
values=severity_values,
|
220 |
+
color=severity_labels,
|
221 |
+
color_discrete_map={
|
222 |
+
'Critical': '#E74C3C',
|
223 |
+
'High': '#F1C40F',
|
224 |
+
'Medium': '#3498DB',
|
225 |
+
'Low': '#2ECC71'
|
226 |
+
},
|
227 |
+
hole=0.4,
|
228 |
+
height=350
|
229 |
+
)
|
230 |
+
|
231 |
+
fig.update_layout(
|
232 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
233 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
234 |
+
showlegend=True,
|
235 |
+
legend=dict(
|
236 |
+
orientation="h",
|
237 |
+
yanchor="bottom",
|
238 |
+
y=-0.2,
|
239 |
+
xanchor="center",
|
240 |
+
x=0.5
|
241 |
+
),
|
242 |
+
margin=dict(l=10, r=10, t=10, b=10),
|
243 |
+
)
|
244 |
+
|
245 |
+
st.plotly_chart(fig, use_container_width=True)
|
246 |
+
|
247 |
+
with tab3:
|
248 |
+
st.subheader("Active Threat Details")
|
249 |
+
|
250 |
+
# Create data for the threat details table
|
251 |
+
threat_details = [
|
252 |
+
{
|
253 |
+
"id": "T-2025-0428",
|
254 |
+
"detected": "2025-04-08 14:32:21",
|
255 |
+
"type": "Data Breach",
|
256 |
+
"target": "Healthcare",
|
257 |
+
"severity": "Critical",
|
258 |
+
"status": "Active",
|
259 |
+
"details": "Patient data exposed on dark web marketplace."
|
260 |
+
},
|
261 |
+
{
|
262 |
+
"id": "T-2025-0427",
|
263 |
+
"detected": "2025-04-08 09:17:45",
|
264 |
+
"type": "Ransomware",
|
265 |
+
"target": "Finance",
|
266 |
+
"severity": "Critical",
|
267 |
+
"status": "Active",
|
268 |
+
"details": "New ransomware variant targeting financial institutions."
|
269 |
+
},
|
270 |
+
{
|
271 |
+
"id": "T-2025-0426",
|
272 |
+
"detected": "2025-04-07 22:03:12",
|
273 |
+
"type": "Zero-day Exploit",
|
274 |
+
"target": "Technology",
|
275 |
+
"severity": "High",
|
276 |
+
"status": "Active",
|
277 |
+
"details": "Critical vulnerability in enterprise software being exploited."
|
278 |
+
},
|
279 |
+
{
|
280 |
+
"id": "T-2025-0425",
|
281 |
+
"detected": "2025-04-07 15:45:39",
|
282 |
+
"type": "Phishing",
|
283 |
+
"target": "Government",
|
284 |
+
"severity": "High",
|
285 |
+
"status": "Active",
|
286 |
+
"details": "Sophisticated phishing campaign targeting government employees."
|
287 |
+
},
|
288 |
+
{
|
289 |
+
"id": "T-2025-0424",
|
290 |
+
"detected": "2025-04-07 11:27:03",
|
291 |
+
"type": "Malware",
|
292 |
+
"target": "Multiple",
|
293 |
+
"severity": "Medium",
|
294 |
+
"status": "Active",
|
295 |
+
"details": "New strain of data-stealing malware distributed via email attachments."
|
296 |
+
}
|
297 |
+
]
|
298 |
+
|
299 |
+
# Create a dataframe for the table
|
300 |
+
threat_df = pd.DataFrame(threat_details)
|
301 |
+
|
302 |
+
# Apply colors to severity column
|
303 |
+
def color_severity(val):
|
304 |
+
color_map = {
|
305 |
+
'Critical': '#E74C3C',
|
306 |
+
'High': '#F1C40F',
|
307 |
+
'Medium': '#3498DB',
|
308 |
+
'Low': '#2ECC71'
|
309 |
+
}
|
310 |
+
return f'background-color: {color_map.get(val, "#ECF0F1")}'
|
311 |
+
|
312 |
+
# Style the dataframe
|
313 |
+
styled_df = threat_df.style.applymap(color_severity, subset=['severity'])
|
314 |
+
|
315 |
+
# Display the table
|
316 |
+
st.dataframe(styled_df, use_container_width=True, height=300)
|
317 |
+
|
318 |
+
# Add action buttons below the table
|
319 |
+
col1, col2, col3, col4 = st.columns(4)
|
320 |
+
|
321 |
+
with col1:
|
322 |
+
st.button("Investigate Selected", key="investigate_btn")
|
323 |
+
|
324 |
+
with col2:
|
325 |
+
st.button("Mark as Resolved", key="resolve_btn")
|
326 |
+
|
327 |
+
with col3:
|
328 |
+
st.button("Export Report", key="export_btn")
|
329 |
+
|
330 |
+
with col4:
|
331 |
+
st.button("Assign to Analyst", key="assign_btn")
|
332 |
+
|
333 |
+
# Threat intelligence section
|
334 |
+
st.markdown("### Threat Intelligence Analysis")
|
335 |
+
|
336 |
+
# Tabs for different intelligence views
|
337 |
+
intel_tab1, intel_tab2, intel_tab3 = st.tabs(["Actor Analysis", "Attack Vectors", "Indicators of Compromise"])
|
338 |
+
|
339 |
+
with intel_tab1:
|
340 |
+
st.subheader("Threat Actor Analysis")
|
341 |
+
|
342 |
+
# Actor table
|
343 |
+
actor_data = [
|
344 |
+
{
|
345 |
+
"actor": "BlackCat Group",
|
346 |
+
"type": "Ransomware",
|
347 |
+
"activity": "High",
|
348 |
+
"targets": "Healthcare, Finance",
|
349 |
+
"ttps": "Double extortion, DDoS threats",
|
350 |
+
"attribution": "Likely Eastern Europe"
|
351 |
+
},
|
352 |
+
{
|
353 |
+
"actor": "CryptoLock",
|
354 |
+
"type": "Ransomware",
|
355 |
+
"activity": "Medium",
|
356 |
+
"targets": "Manufacturing, Energy",
|
357 |
+
"ttps": "Supply chain attacks",
|
358 |
+
"attribution": "Unknown"
|
359 |
+
},
|
360 |
+
{
|
361 |
+
"actor": "DarkLeaks",
|
362 |
+
"type": "Data Broker",
|
363 |
+
"activity": "High",
|
364 |
+
"targets": "All sectors",
|
365 |
+
"ttps": "Data aggregation, auction site",
|
366 |
+
"attribution": "Multiple affiliates"
|
367 |
+
}
|
368 |
+
]
|
369 |
+
|
370 |
+
actor_df = pd.DataFrame(actor_data)
|
371 |
+
st.dataframe(actor_df, use_container_width=True)
|
372 |
+
|
373 |
+
# Relationship graph placeholder
|
374 |
+
st.subheader("Threat Actor Relationships")
|
375 |
+
st.image("https://images.unsplash.com/photo-1510987836583-e3fb9586c7b3",
|
376 |
+
caption="Network analysis of threat actor relationships and infrastructure",
|
377 |
+
use_column_width=True)
|
378 |
+
|
379 |
+
with intel_tab2:
|
380 |
+
st.subheader("Common Attack Vectors")
|
381 |
+
|
382 |
+
# Attack vector distribution
|
383 |
+
vectors = ['Phishing Email', 'Compromised Credentials', 'Malware Infection',
|
384 |
+
'Supply Chain', 'Unpatched Vulnerability', 'Social Engineering']
|
385 |
+
percentages = [35, 28, 15, 10, 8, 4]
|
386 |
+
|
387 |
+
vector_data = pd.DataFrame({
|
388 |
+
'Vector': vectors,
|
389 |
+
'Percentage': percentages
|
390 |
+
})
|
391 |
+
|
392 |
+
# Horizontal bar chart for attack vectors
|
393 |
+
fig = px.bar(
|
394 |
+
vector_data,
|
395 |
+
x='Percentage',
|
396 |
+
y='Vector',
|
397 |
+
orientation='h',
|
398 |
+
color='Percentage',
|
399 |
+
color_continuous_scale=['#2ECC71', '#3498DB', '#F1C40F', '#E74C3C'],
|
400 |
+
height=300
|
401 |
+
)
|
402 |
+
|
403 |
+
fig.update_layout(
|
404 |
+
paper_bgcolor='rgba(26, 26, 26, 0)',
|
405 |
+
plot_bgcolor='rgba(26, 26, 26, 0)',
|
406 |
+
coloraxis_showscale=False,
|
407 |
+
xaxis=dict(
|
408 |
+
title='Percentage of Attacks',
|
409 |
+
showgrid=True,
|
410 |
+
gridcolor='rgba(44, 62, 80, 0.3)',
|
411 |
+
tickfont=dict(color='#ECF0F1')
|
412 |
+
),
|
413 |
+
yaxis=dict(
|
414 |
+
title=None,
|
415 |
+
showgrid=False,
|
416 |
+
tickfont=dict(color='#ECF0F1')
|
417 |
+
),
|
418 |
+
margin=dict(l=10, r=10, t=10, b=10)
|
419 |
+
)
|
420 |
+
|
421 |
+
st.plotly_chart(fig, use_container_width=True)
|
422 |
+
|
423 |
+
# Technical details section
|
424 |
+
st.subheader("Technical Analysis")
|
425 |
+
|
426 |
+
vector_tabs = st.tabs(["Phishing", "Malware", "Vulnerabilities"])
|
427 |
+
|
428 |
+
with vector_tabs[0]:
|
429 |
+
st.markdown("#### Phishing Campaign Analysis")
|
430 |
+
st.markdown("""
|
431 |
+
Recent phishing campaigns observed in dark web forums targeting:
|
432 |
+
- Financial institutions (spoofed login pages)
|
433 |
+
- Healthcare providers (fake patient portals)
|
434 |
+
- Government employees (document sharing lures)
|
435 |
+
|
436 |
+
**Tactics include:**
|
437 |
+
- Lookalike domains with valid SSL certificates
|
438 |
+
- Evasion of email security through legitimate hosting services
|
439 |
+
- Use of shortened URLs to disguise destinations
|
440 |
+
""")
|
441 |
+
|
442 |
+
with vector_tabs[1]:
|
443 |
+
st.markdown("#### Malware Analysis")
|
444 |
+
st.markdown("""
|
445 |
+
Prevalent malware families being distributed:
|
446 |
+
- TrickBot (banking trojan with evolving capabilities)
|
447 |
+
- Emotet (modular malware with spam capabilities)
|
448 |
+
- Conti (ransomware with data exfiltration)
|
449 |
+
|
450 |
+
**Distribution channels:**
|
451 |
+
- Malicious email attachments (Excel files with macros)
|
452 |
+
- Compromised software updates
|
453 |
+
- Drive-by downloads from compromised websites
|
454 |
+
""")
|
455 |
+
|
456 |
+
with vector_tabs[2]:
|
457 |
+
st.markdown("#### Vulnerability Exploitation")
|
458 |
+
st.markdown("""
|
459 |
+
Critical vulnerabilities being actively exploited:
|
460 |
+
- CVE-2024-1234: Remote code execution in web servers
|
461 |
+
- CVE-2024-5678: Authentication bypass in VPN appliances
|
462 |
+
- CVE-2024-9101: Privilege escalation in enterprise software
|
463 |
+
|
464 |
+
**Exploitation timeline:**
|
465 |
+
- Average time from disclosure to exploitation: 72 hours
|
466 |
+
- Peak exploitation activity occurs within 2 weeks
|
467 |
+
- Persistence mechanisms often installed for long-term access
|
468 |
+
""")
|
469 |
+
|
470 |
+
with intel_tab3:
|
471 |
+
st.subheader("Indicators of Compromise (IoCs)")
|
472 |
+
|
473 |
+
# IoC tabs
|
474 |
+
ioc_tabs = st.tabs(["IP Addresses", "Domains", "File Hashes", "URLs"])
|
475 |
+
|
476 |
+
with ioc_tabs[0]:
|
477 |
+
ip_data = pd.DataFrame({
|
478 |
+
'IP Address': ['198.51.100.123', '203.0.113.45', '198.51.100.67', '203.0.113.89', '198.51.100.213'],
|
479 |
+
'ASN': ['AS12345', 'AS67890', 'AS12345', 'AS23456', 'AS34567'],
|
480 |
+
'Country': ['Russia', 'China', 'Russia', 'Ukraine', 'Brazil'],
|
481 |
+
'First Seen': ['2025-04-01', '2025-04-03', '2025-04-04', '2025-04-05', '2025-04-07'],
|
482 |
+
'Last Seen': ['2025-04-08', '2025-04-08', '2025-04-08', '2025-04-07', '2025-04-08'],
|
483 |
+
'Associated Malware': ['TrickBot', 'Emotet', 'TrickBot', 'BlackCat', 'Conti']
|
484 |
+
})
|
485 |
+
|
486 |
+
st.dataframe(ip_data, use_container_width=True)
|
487 |
+
|
488 |
+
with ioc_tabs[1]:
|
489 |
+
domain_data = pd.DataFrame({
|
490 |
+
'Domain': ['secure-banklogin.com', 'microsoft-update.xyz', 'docusign-view.net', 'healthcare-portal.org', 'service-login.co'],
|
491 |
+
'IP Address': ['198.51.100.123', '203.0.113.45', '198.51.100.67', '203.0.113.89', '198.51.100.213'],
|
492 |
+
'Registrar': ['NameCheap', 'GoDaddy', 'Namecheap', 'Hostinger', 'GoDaddy'],
|
493 |
+
'Created Date': ['2025-03-30', '2025-04-01', '2025-04-02', '2025-04-03', '2025-04-05'],
|
494 |
+
'Classification': ['Phishing', 'Malware C2', 'Phishing', 'Phishing', 'Phishing']
|
495 |
+
})
|
496 |
+
|
497 |
+
st.dataframe(domain_data, use_container_width=True)
|
498 |
+
|
499 |
+
with ioc_tabs[2]:
|
500 |
+
hash_data = pd.DataFrame({
|
501 |
+
'File Hash (SHA-256)': [
|
502 |
+
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855',
|
503 |
+
'a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a',
|
504 |
+
'3f39d5c348e5b79d06e842c114e6cc571583bbf44e4b0ebfda1a01ec05745d43',
|
505 |
+
'ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb',
|
506 |
+
'2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae'
|
507 |
+
],
|
508 |
+
'File Name': ['invoice.doc', 'setup.exe', 'update.exe', 'report.xlsx', 'attachment.pdf'],
|
509 |
+
'File Type': ['DOC', 'EXE', 'EXE', 'XLSX', 'PDF'],
|
510 |
+
'Detection Ratio': ['37/58', '42/58', '29/58', '35/58', '23/58'],
|
511 |
+
'Malware Family': ['Emotet', 'TrickBot', 'Conti', 'Emotet', 'AgentTesla']
|
512 |
+
})
|
513 |
+
|
514 |
+
st.dataframe(hash_data, use_container_width=True)
|
515 |
+
|
516 |
+
with ioc_tabs[3]:
|
517 |
+
url_data = pd.DataFrame({
|
518 |
+
'URL': [
|
519 |
+
'https://secure-banklogin.com/auth/login.php',
|
520 |
+
'https://microsoft-update.xyz/download/patch.exe',
|
521 |
+
'https://docusign-view.net/document/invoice.doc',
|
522 |
+
'https://healthcare-portal.org/patient/login',
|
523 |
+
'https://service-login.co/auth/reset'
|
524 |
+
],
|
525 |
+
'Status': ['Active', 'Active', 'Inactive', 'Active', 'Active'],
|
526 |
+
'Classification': ['Phishing', 'Malware Distribution', 'Phishing', 'Phishing', 'Phishing'],
|
527 |
+
'Target': ['Banking Customers', 'General', 'Business', 'Healthcare', 'General'],
|
528 |
+
'First Reported': ['2025-04-02', '2025-04-03', '2025-04-04', '2025-04-06', '2025-04-07']
|
529 |
+
})
|
530 |
+
|
531 |
+
st.dataframe(url_data, use_container_width=True)
|
532 |
+
|
533 |
+
# Action buttons
|
534 |
+
col1, col2, col3 = st.columns(3)
|
535 |
+
|
536 |
+
with col1:
|
537 |
+
st.button("Export IoCs", key="export_ioc_btn")
|
538 |
+
|
539 |
+
with col2:
|
540 |
+
st.button("Add to Blocklist", key="blocklist_btn")
|
541 |
+
|
542 |
+
with col3:
|
543 |
+
st.button("Share Intelligence", key="share_intel_btn")
|
components/web_scraper.py
ADDED
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Web scraper component for Streamlit frontend.
|
3 |
+
This integrates with the backend scraper service.
|
4 |
+
"""
|
5 |
+
import streamlit as st
|
6 |
+
import pandas as pd
|
7 |
+
import plotly.graph_objects as go
|
8 |
+
import time
|
9 |
+
import re
|
10 |
+
import asyncio
|
11 |
+
import httpx
|
12 |
+
from typing import Dict, Any, List, Optional
|
13 |
+
import json
|
14 |
+
import sys
|
15 |
+
import os
|
16 |
+
|
17 |
+
# Add the src directory to the path so we can import the services
|
18 |
+
sys.path.append(os.path.abspath('.'))
|
19 |
+
|
20 |
+
try:
|
21 |
+
from src.services.scraper import WebScraper
|
22 |
+
from src.services.tor_proxy import TorProxyService
|
23 |
+
except ImportError:
|
24 |
+
# Fallback if imports fail - we'll use a simplified version
|
25 |
+
WebScraper = None
|
26 |
+
TorProxyService = None
|
27 |
+
|
28 |
+
# Check if Tor is running
|
29 |
+
def is_tor_running() -> bool:
|
30 |
+
"""Check if Tor service is running and accessible."""
|
31 |
+
try:
|
32 |
+
with httpx.Client(timeout=3) as client:
|
33 |
+
response = client.get("http://127.0.0.1:9050")
|
34 |
+
return True
|
35 |
+
except Exception:
|
36 |
+
return False
|
37 |
+
|
38 |
+
# Create a scraper instance
|
39 |
+
async def get_scraper():
|
40 |
+
"""Get a configured scraper instance."""
|
41 |
+
if WebScraper and TorProxyService:
|
42 |
+
try:
|
43 |
+
tor_proxy = TorProxyService()
|
44 |
+
# Check if Tor is accessible
|
45 |
+
is_connected = await tor_proxy.check_connection()
|
46 |
+
if is_connected:
|
47 |
+
return WebScraper(tor_proxy_service=tor_proxy)
|
48 |
+
except Exception as e:
|
49 |
+
st.error(f"Error connecting to Tor: {e}")
|
50 |
+
|
51 |
+
# If we can't connect to Tor or imports failed, return None
|
52 |
+
return None
|
53 |
+
|
54 |
+
async def extract_content(url: str, use_tor: bool = False) -> Dict[str, Any]:
|
55 |
+
"""
|
56 |
+
Extract content from a URL using the backend scraper.
|
57 |
+
|
58 |
+
Args:
|
59 |
+
url (str): URL to scrape
|
60 |
+
use_tor (bool): Whether to use Tor proxy
|
61 |
+
|
62 |
+
Returns:
|
63 |
+
Dict[str, Any]: Extracted content
|
64 |
+
"""
|
65 |
+
scraper = await get_scraper()
|
66 |
+
|
67 |
+
if scraper:
|
68 |
+
try:
|
69 |
+
return await scraper.extract_content(url, use_tor=use_tor)
|
70 |
+
except Exception as e:
|
71 |
+
st.error(f"Error extracting content: {e}")
|
72 |
+
return {
|
73 |
+
"url": url,
|
74 |
+
"title": "Error extracting content",
|
75 |
+
"text_content": f"Failed to extract content: {e}",
|
76 |
+
"indicators": {},
|
77 |
+
"links": []
|
78 |
+
}
|
79 |
+
else:
|
80 |
+
# Fallback to simulated data if scraper is unavailable
|
81 |
+
st.warning("Advanced scraping functionality unavailable. Using limited extraction.")
|
82 |
+
try:
|
83 |
+
with httpx.Client(timeout=10) as client:
|
84 |
+
response = client.get(url)
|
85 |
+
return {
|
86 |
+
"url": url,
|
87 |
+
"title": f"Content from {url}",
|
88 |
+
"text_content": response.text[:1000] + "...",
|
89 |
+
"indicators": {},
|
90 |
+
"links": []
|
91 |
+
}
|
92 |
+
except Exception as e:
|
93 |
+
return {
|
94 |
+
"url": url,
|
95 |
+
"title": "Error fetching content",
|
96 |
+
"text_content": f"Failed to fetch content: {e}",
|
97 |
+
"indicators": {},
|
98 |
+
"links": []
|
99 |
+
}
|
100 |
+
|
101 |
+
def render_indicators(indicators: Dict[str, List[str]]):
|
102 |
+
"""
|
103 |
+
Render extracted indicators in a formatted way.
|
104 |
+
|
105 |
+
Args:
|
106 |
+
indicators (Dict[str, List[str]]): Dictionary of indicator types and values
|
107 |
+
"""
|
108 |
+
if not indicators:
|
109 |
+
st.info("No indicators found in the content.")
|
110 |
+
return
|
111 |
+
|
112 |
+
# Create tabs for different indicator types
|
113 |
+
tabs = st.tabs([
|
114 |
+
f"IP Addresses ({len(indicators.get('ip_addresses', []))})",
|
115 |
+
f"Emails ({len(indicators.get('email_addresses', []))})",
|
116 |
+
f"Bitcoin ({len(indicators.get('bitcoin_addresses', []))})",
|
117 |
+
f"URLs ({len(indicators.get('urls', []))})",
|
118 |
+
f"Onion URLs ({len(indicators.get('onion_urls', []))})"
|
119 |
+
])
|
120 |
+
|
121 |
+
# IP Addresses
|
122 |
+
with tabs[0]:
|
123 |
+
if indicators.get('ip_addresses'):
|
124 |
+
st.markdown("#### Extracted IP Addresses")
|
125 |
+
ip_df = pd.DataFrame(indicators['ip_addresses'], columns=["IP Address"])
|
126 |
+
st.dataframe(ip_df, use_container_width=True)
|
127 |
+
else:
|
128 |
+
st.info("No IP addresses found.")
|
129 |
+
|
130 |
+
# Email Addresses
|
131 |
+
with tabs[1]:
|
132 |
+
if indicators.get('email_addresses'):
|
133 |
+
st.markdown("#### Extracted Email Addresses")
|
134 |
+
email_df = pd.DataFrame(indicators['email_addresses'], columns=["Email"])
|
135 |
+
st.dataframe(email_df, use_container_width=True)
|
136 |
+
else:
|
137 |
+
st.info("No email addresses found.")
|
138 |
+
|
139 |
+
# Bitcoin Addresses
|
140 |
+
with tabs[2]:
|
141 |
+
if indicators.get('bitcoin_addresses'):
|
142 |
+
st.markdown("#### Extracted Bitcoin Addresses")
|
143 |
+
btc_df = pd.DataFrame(indicators['bitcoin_addresses'], columns=["Bitcoin Address"])
|
144 |
+
st.dataframe(btc_df, use_container_width=True)
|
145 |
+
else:
|
146 |
+
st.info("No Bitcoin addresses found.")
|
147 |
+
|
148 |
+
# URLs
|
149 |
+
with tabs[3]:
|
150 |
+
if indicators.get('urls'):
|
151 |
+
st.markdown("#### Extracted URLs")
|
152 |
+
url_df = pd.DataFrame(indicators['urls'], columns=["URL"])
|
153 |
+
st.dataframe(url_df, use_container_width=True)
|
154 |
+
else:
|
155 |
+
st.info("No URLs found.")
|
156 |
+
|
157 |
+
# Onion URLs
|
158 |
+
with tabs[4]:
|
159 |
+
if indicators.get('onion_urls'):
|
160 |
+
st.markdown("#### Extracted Onion URLs")
|
161 |
+
onion_df = pd.DataFrame(indicators['onion_urls'], columns=["Onion URL"])
|
162 |
+
st.dataframe(onion_df, use_container_width=True)
|
163 |
+
else:
|
164 |
+
st.info("No onion URLs found.")
|
165 |
+
|
166 |
+
def create_keyword_highlight(text: str, keywords: Optional[List[str]] = None) -> str:
|
167 |
+
"""
|
168 |
+
Highlight keywords in text for display.
|
169 |
+
|
170 |
+
Args:
|
171 |
+
text (str): Text content to highlight
|
172 |
+
keywords (Optional[List[str]]): Keywords to highlight
|
173 |
+
|
174 |
+
Returns:
|
175 |
+
str: HTML with highlighted keywords
|
176 |
+
"""
|
177 |
+
if not text or not keywords:
|
178 |
+
return text
|
179 |
+
|
180 |
+
# Escape HTML
|
181 |
+
text = text.replace('<', '<').replace('>', '>')
|
182 |
+
|
183 |
+
# Highlight keywords
|
184 |
+
for keyword in keywords:
|
185 |
+
if not keyword.strip():
|
186 |
+
continue
|
187 |
+
pattern = re.compile(re.escape(keyword), re.IGNORECASE)
|
188 |
+
text = pattern.sub(f'<span style="background-color: #E74C3C40; padding: 0 2px; border-radius: 3px;">{keyword}</span>', text)
|
189 |
+
|
190 |
+
return text
|
191 |
+
|
192 |
+
def render_web_scraper_ui():
|
193 |
+
"""Render the web scraper user interface."""
|
194 |
+
st.title("Dark Web Intelligence Gathering")
|
195 |
+
|
196 |
+
# Check if Tor is accessible
|
197 |
+
if is_tor_running():
|
198 |
+
st.success("Tor service is available for .onion sites")
|
199 |
+
else:
|
200 |
+
st.warning("Tor service not detected. Limited to clearnet sites only.")
|
201 |
+
|
202 |
+
# Create UI layout
|
203 |
+
col1, col2 = st.columns([2, 1])
|
204 |
+
|
205 |
+
with col1:
|
206 |
+
st.markdown("### Content Extraction & Analysis")
|
207 |
+
|
208 |
+
# URL input
|
209 |
+
url = st.text_input(
|
210 |
+
"Enter URL to analyze",
|
211 |
+
value="https://example.com",
|
212 |
+
help="Enter a URL to scrape and analyze. For .onion sites, ensure Tor is configured."
|
213 |
+
)
|
214 |
+
|
215 |
+
# Options
|
216 |
+
use_tor = st.checkbox(
|
217 |
+
"Use Tor proxy",
|
218 |
+
value='.onion' in url,
|
219 |
+
help="Use Tor proxy for accessing .onion sites or for anonymity"
|
220 |
+
)
|
221 |
+
|
222 |
+
# Keyword highlighting
|
223 |
+
keywords_input = st.text_area(
|
224 |
+
"Keywords to highlight (one per line)",
|
225 |
+
value="example\ndata\nbreach",
|
226 |
+
help="Enter keywords to highlight in the extracted content"
|
227 |
+
)
|
228 |
+
keywords = [k.strip() for k in keywords_input.split('\n') if k.strip()]
|
229 |
+
|
230 |
+
# Extract button
|
231 |
+
extract_button = st.button("Extract Content")
|
232 |
+
|
233 |
+
with col2:
|
234 |
+
st.markdown("### Analysis Options")
|
235 |
+
|
236 |
+
analysis_tabs = st.radio(
|
237 |
+
"Analysis Type",
|
238 |
+
["Text Analysis", "Indicators", "Sentiment Analysis", "Entity Recognition"],
|
239 |
+
help="Select the type of analysis to perform on the extracted content"
|
240 |
+
)
|
241 |
+
|
242 |
+
st.markdown("### Monitoring")
|
243 |
+
monitoring_options = st.multiselect(
|
244 |
+
"Add to monitoring list",
|
245 |
+
["IP Addresses", "Email Addresses", "Bitcoin Addresses", "URLs", "Onion URLs"],
|
246 |
+
default=["IP Addresses", "URLs"],
|
247 |
+
help="Select which indicator types to monitor"
|
248 |
+
)
|
249 |
+
|
250 |
+
alert_threshold = st.slider(
|
251 |
+
"Alert Threshold",
|
252 |
+
min_value=0.0,
|
253 |
+
max_value=1.0,
|
254 |
+
value=0.7,
|
255 |
+
step=0.05,
|
256 |
+
help="Set the confidence threshold for alerts"
|
257 |
+
)
|
258 |
+
|
259 |
+
# Handle content extraction
|
260 |
+
if extract_button:
|
261 |
+
with st.spinner("Extracting content..."):
|
262 |
+
# Run the async extraction
|
263 |
+
content_data = asyncio.run(extract_content(url, use_tor=use_tor))
|
264 |
+
|
265 |
+
# Store results in session state
|
266 |
+
st.session_state.extracted_content = content_data
|
267 |
+
|
268 |
+
# Success message
|
269 |
+
st.success(f"Content extracted from {url}")
|
270 |
+
|
271 |
+
# Display extracted content if available
|
272 |
+
if 'extracted_content' in st.session_state:
|
273 |
+
content_data = st.session_state.extracted_content
|
274 |
+
|
275 |
+
# Display content in tabs
|
276 |
+
content_tabs = st.tabs(["Extracted Text", "Indicators", "Metadata", "Raw HTML"])
|
277 |
+
|
278 |
+
# Extracted text tab
|
279 |
+
with content_tabs[0]:
|
280 |
+
st.markdown(f"### {content_data.get('title', 'Extracted Content')}")
|
281 |
+
st.info(f"Source: {content_data.get('url')}")
|
282 |
+
|
283 |
+
# Highlight keywords in text
|
284 |
+
highlighted_text = create_keyword_highlight(
|
285 |
+
content_data.get('text_content', 'No content extracted'),
|
286 |
+
keywords
|
287 |
+
)
|
288 |
+
|
289 |
+
st.markdown(f"""
|
290 |
+
<div style="border: 1px solid #3498DB; border-radius: 5px; padding: 15px;
|
291 |
+
background-color: #1A1A1A; height: 400px; overflow-y: auto;">
|
292 |
+
{highlighted_text}
|
293 |
+
</div>
|
294 |
+
""", unsafe_allow_html=True)
|
295 |
+
|
296 |
+
# Indicators tab
|
297 |
+
with content_tabs[1]:
|
298 |
+
render_indicators(content_data.get('indicators', {}))
|
299 |
+
|
300 |
+
# Metadata tab
|
301 |
+
with content_tabs[2]:
|
302 |
+
st.markdown("### Document Metadata")
|
303 |
+
|
304 |
+
metadata = content_data.get('metadata', {})
|
305 |
+
if metadata:
|
306 |
+
for key, value in metadata.items():
|
307 |
+
if value:
|
308 |
+
st.markdown(f"**{key}:** {value}")
|
309 |
+
else:
|
310 |
+
st.info("No metadata available")
|
311 |
+
|
312 |
+
# Raw HTML tab
|
313 |
+
with content_tabs[3]:
|
314 |
+
st.markdown("### Raw HTML")
|
315 |
+
with st.expander("Show Raw HTML"):
|
316 |
+
st.code(content_data.get('html_content', 'No HTML content available'), language="html")
|
317 |
+
|
318 |
+
# Additional informational UI elements
|
319 |
+
st.markdown("---")
|
320 |
+
st.markdown("### About Dark Web Intelligence")
|
321 |
+
st.markdown("""
|
322 |
+
This tool allows you to extract and analyze content from both clearnet and dark web sites.
|
323 |
+
For .onion sites, make sure Tor is properly configured.
|
324 |
+
|
325 |
+
**Features:**
|
326 |
+
- Extract and analyze content from any URL
|
327 |
+
- Highlight keywords of interest
|
328 |
+
- Identify indicators of compromise (IoCs)
|
329 |
+
- Add indicators to monitoring list
|
330 |
+
""")
|
hf_app.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
CyberForge Dashboard - Hugging Face Spaces Version
|
3 |
+
"""
|
4 |
+
import os
|
5 |
+
import sys
|
6 |
+
import streamlit as st
|
7 |
+
from streamlit_extras.add_vertical_space import add_vertical_space
|
8 |
+
from streamlit_extras.colored_header import colored_header
|
9 |
+
|
10 |
+
# Check if we're running on Hugging Face Spaces
|
11 |
+
is_huggingface = os.environ.get('SPACE_ID') is not None
|
12 |
+
|
13 |
+
# Set the page config
|
14 |
+
st.set_page_config(
|
15 |
+
page_title="CyberForge Dashboard",
|
16 |
+
page_icon="🕵️♂️",
|
17 |
+
layout="wide",
|
18 |
+
initial_sidebar_state="expanded"
|
19 |
+
)
|
20 |
+
|
21 |
+
# Add custom CSS
|
22 |
+
st.markdown("""
|
23 |
+
<style>
|
24 |
+
.stApp {
|
25 |
+
background-color: #0e1117;
|
26 |
+
}
|
27 |
+
.sidebar .sidebar-content {
|
28 |
+
background-color: #262730;
|
29 |
+
}
|
30 |
+
h1, h2, h3 {
|
31 |
+
color: #f8f9fa;
|
32 |
+
}
|
33 |
+
.cybertext {
|
34 |
+
color: #00ff8d;
|
35 |
+
font-family: monospace;
|
36 |
+
}
|
37 |
+
</style>
|
38 |
+
""", unsafe_allow_html=True)
|
39 |
+
|
40 |
+
# Choose between HF demo mode or regular mode
|
41 |
+
if is_huggingface:
|
42 |
+
# Initialize in-memory database for Hugging Face
|
43 |
+
import hf_database
|
44 |
+
st.session_state.is_demo = True
|
45 |
+
|
46 |
+
# Show demo mode banner
|
47 |
+
st.warning("⚠️ Running in Hugging Face Spaces DEMO MODE. Data is stored in-memory and will be reset when the space restarts.")
|
48 |
+
else:
|
49 |
+
# Regular database initialization
|
50 |
+
import src.database_init
|
51 |
+
|
52 |
+
# Import components
|
53 |
+
from components.dashboard import render_dashboard
|
54 |
+
from components.threats import render_threats
|
55 |
+
from components.monitoring import render_monitoring
|
56 |
+
from components.alerts import render_alerts
|
57 |
+
from components.reports import render_reports
|
58 |
+
from components.live_feed import render_live_feed, render_content_analysis
|
59 |
+
from components.web_scraper import render_web_scraper_ui
|
60 |
+
|
61 |
+
# Custom notification function
|
62 |
+
def add_notification(title, message, severity="info", icon="🔔"):
|
63 |
+
"""Add a notification to the session state"""
|
64 |
+
if "notifications" not in st.session_state:
|
65 |
+
st.session_state.notifications = []
|
66 |
+
|
67 |
+
# Add notification with timestamp
|
68 |
+
import time
|
69 |
+
notification = {
|
70 |
+
"id": int(time.time() * 1000),
|
71 |
+
"title": title,
|
72 |
+
"message": message,
|
73 |
+
"severity": severity,
|
74 |
+
"icon": icon,
|
75 |
+
"read": False,
|
76 |
+
"timestamp": time.time()
|
77 |
+
}
|
78 |
+
st.session_state.notifications.insert(0, notification)
|
79 |
+
|
80 |
+
# Initialize notifications if needed
|
81 |
+
if "notifications" not in st.session_state:
|
82 |
+
st.session_state.notifications = []
|
83 |
+
|
84 |
+
# Sidebar navigation
|
85 |
+
with st.sidebar:
|
86 |
+
st.image("assets/cyberforge_logo.svg", width=200)
|
87 |
+
st.title("CyberForge")
|
88 |
+
|
89 |
+
# Demo badge
|
90 |
+
if st.session_state.get("is_demo", False):
|
91 |
+
st.markdown("#### 🔍 Demo Mode")
|
92 |
+
|
93 |
+
st.markdown("---")
|
94 |
+
|
95 |
+
# Navigation
|
96 |
+
nav_selection = st.radio(
|
97 |
+
"Navigation",
|
98 |
+
["Dashboard", "Threats", "Monitoring", "Alerts", "Reports", "Live Feed", "Content Analysis", "Web Scraper"]
|
99 |
+
)
|
100 |
+
|
101 |
+
# User information
|
102 |
+
st.markdown("---")
|
103 |
+
st.markdown("### User Info")
|
104 |
+
if st.session_state.get("is_demo", False):
|
105 |
+
st.markdown("👤 **Admin User** (Demo)")
|
106 |
+
st.markdown("🔑 Role: Administrator")
|
107 |
+
else:
|
108 |
+
st.markdown("👤 **Analyst**")
|
109 |
+
st.markdown("🔑 Role: Security Analyst")
|
110 |
+
|
111 |
+
# Notification count
|
112 |
+
unread_count = sum(1 for n in st.session_state.notifications if not n["read"])
|
113 |
+
if unread_count > 0:
|
114 |
+
st.markdown(f"🔔 **{unread_count}** unread notifications")
|
115 |
+
|
116 |
+
# Credits
|
117 |
+
st.markdown("---")
|
118 |
+
st.markdown("### CyberForge v1.0")
|
119 |
+
st.markdown("© 2025 Chemically Motivated Solutions")
|
120 |
+
|
121 |
+
# HF badge if on Hugging Face
|
122 |
+
if is_huggingface:
|
123 |
+
st.markdown("---")
|
124 |
+
st.markdown("""
|
125 |
+
<a href="https://huggingface.co/spaces" target="_blank">
|
126 |
+
<img src="https://img.shields.io/badge/Hosted%20on-HF%20Spaces-blue" alt="HuggingFace Spaces"/>
|
127 |
+
</a>
|
128 |
+
""", unsafe_allow_html=True)
|
129 |
+
|
130 |
+
# Main content area
|
131 |
+
if nav_selection == "Dashboard":
|
132 |
+
render_dashboard()
|
133 |
+
elif nav_selection == "Threats":
|
134 |
+
render_threats()
|
135 |
+
elif nav_selection == "Monitoring":
|
136 |
+
render_monitoring()
|
137 |
+
elif nav_selection == "Alerts":
|
138 |
+
render_alerts()
|
139 |
+
elif nav_selection == "Reports":
|
140 |
+
render_reports()
|
141 |
+
elif nav_selection == "Live Feed":
|
142 |
+
render_live_feed()
|
143 |
+
elif nav_selection == "Content Analysis":
|
144 |
+
render_content_analysis()
|
145 |
+
elif nav_selection == "Web Scraper":
|
146 |
+
render_web_scraper_ui()
|
hf_database.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Database initialization for Hugging Face Spaces environment.
|
3 |
+
This creates an in-memory SQLite database for demo purposes.
|
4 |
+
"""
|
5 |
+
import logging
|
6 |
+
import os
|
7 |
+
import sqlite3
|
8 |
+
from sqlalchemy import create_engine, event
|
9 |
+
from sqlalchemy.orm import sessionmaker
|
10 |
+
from sqlalchemy.pool import StaticPool
|
11 |
+
from src.models.base import Base
|
12 |
+
from src.models.user import User
|
13 |
+
from src.api.security import get_password_hash
|
14 |
+
|
15 |
+
# Configure logging
|
16 |
+
logging.basicConfig(level=logging.INFO)
|
17 |
+
logger = logging.getLogger(__name__)
|
18 |
+
|
19 |
+
# Database URL for SQLite in-memory
|
20 |
+
DATABASE_URL = "sqlite:///:memory:"
|
21 |
+
|
22 |
+
# Create engine with special configuration for in-memory SQLite
|
23 |
+
engine = create_engine(
|
24 |
+
DATABASE_URL,
|
25 |
+
connect_args={"check_same_thread": False},
|
26 |
+
poolclass=StaticPool,
|
27 |
+
echo=False
|
28 |
+
)
|
29 |
+
|
30 |
+
# Add pragma for foreign key support
|
31 |
+
@event.listens_for(engine, "connect")
|
32 |
+
def set_sqlite_pragma(dbapi_connection, connection_record):
|
33 |
+
cursor = dbapi_connection.cursor()
|
34 |
+
cursor.execute("PRAGMA foreign_keys=ON")
|
35 |
+
cursor.close()
|
36 |
+
|
37 |
+
# Create all tables
|
38 |
+
Base.metadata.create_all(engine)
|
39 |
+
|
40 |
+
# Create session factory
|
41 |
+
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
42 |
+
|
43 |
+
def init_demo_data():
|
44 |
+
"""Initialize demo data for the in-memory database."""
|
45 |
+
session = SessionLocal()
|
46 |
+
try:
|
47 |
+
# Check if we already have users
|
48 |
+
user_count = session.query(User).count()
|
49 |
+
if user_count == 0:
|
50 |
+
# Create admin user
|
51 |
+
admin_user = User(
|
52 |
+
username="admin",
|
53 |
+
email="[email protected]",
|
54 |
+
full_name="Admin User",
|
55 |
+
hashed_password=get_password_hash("adminpassword"),
|
56 |
+
is_active=True,
|
57 |
+
is_superuser=True
|
58 |
+
)
|
59 |
+
session.add(admin_user)
|
60 |
+
|
61 |
+
# Create regular user
|
62 |
+
regular_user = User(
|
63 |
+
username="user",
|
64 |
+
email="[email protected]",
|
65 |
+
full_name="Regular User",
|
66 |
+
hashed_password=get_password_hash("userpassword"),
|
67 |
+
is_active=True,
|
68 |
+
is_superuser=False
|
69 |
+
)
|
70 |
+
session.add(regular_user)
|
71 |
+
|
72 |
+
# Create API user
|
73 |
+
api_user = User(
|
74 |
+
username="api_user",
|
75 |
+
email="[email protected]",
|
76 |
+
full_name="API User",
|
77 |
+
hashed_password=get_password_hash("apipassword"),
|
78 |
+
is_active=True,
|
79 |
+
is_superuser=False
|
80 |
+
)
|
81 |
+
session.add(api_user)
|
82 |
+
|
83 |
+
# Commit the session
|
84 |
+
session.commit()
|
85 |
+
logger.info("Demo users created successfully")
|
86 |
+
else:
|
87 |
+
logger.info("Demo data already exists")
|
88 |
+
|
89 |
+
# Here you would add other demo data like threats, indicators, etc.
|
90 |
+
|
91 |
+
except Exception as e:
|
92 |
+
session.rollback()
|
93 |
+
logger.error(f"Error initializing demo data: {e}")
|
94 |
+
finally:
|
95 |
+
session.close()
|
96 |
+
|
97 |
+
# Initialize demo data
|
98 |
+
init_demo_data()
|
99 |
+
|
100 |
+
logger.info("Hugging Face database initialized with demo data")
|
huggingface-space.yml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
title: CyberForge Dashboard
|
2 |
+
emoji: 🕵️♂️
|
3 |
+
colorFrom: blue
|
4 |
+
colorTo: indigo
|
5 |
+
sdk: streamlit
|
6 |
+
sdk_version: 1.32.0
|
7 |
+
app_file: hf_app.py
|
8 |
+
pinned: false
|
9 |
+
license: mit
|
requirements.txt
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
alembic==1.13.1
|
2 |
+
asyncpg==0.29.0
|
3 |
+
bcrypt==4.1.2
|
4 |
+
beautifulsoup4==4.12.2
|
5 |
+
celery==5.3.6
|
6 |
+
email-validator==2.1.0.post1
|
7 |
+
fastapi==0.109.2
|
8 |
+
httpx==0.27.0
|
9 |
+
pandas==2.1.0
|
10 |
+
passlib==1.7.4
|
11 |
+
plotly==5.18.0
|
12 |
+
pysocks==1.7.1
|
13 |
+
python-jose==3.3.0
|
14 |
+
redis==5.0.1
|
15 |
+
sqlalchemy==2.0.28
|
16 |
+
streamlit==1.32.0
|
17 |
+
streamlit-extras==0.3.5
|
18 |
+
trafilatura==1.6.3
|
19 |
+
python-multipart==0.0.7
|
20 |
+
pydantic[email]==2.4.2
|
src/database_init.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Database initialization for the application.
|
3 |
+
|
4 |
+
This script checks if the database is initialized and creates tables if needed.
|
5 |
+
It's meant to be imported and run at application startup.
|
6 |
+
"""
|
7 |
+
import os
|
8 |
+
import logging
|
9 |
+
import asyncio
|
10 |
+
from sqlalchemy.ext.asyncio import create_async_engine
|
11 |
+
from sqlalchemy.ext.asyncio import AsyncSession
|
12 |
+
from sqlalchemy.orm import sessionmaker
|
13 |
+
from sqlalchemy.future import select
|
14 |
+
import subprocess
|
15 |
+
import sys
|
16 |
+
|
17 |
+
# Configure logging
|
18 |
+
logging.basicConfig(
|
19 |
+
level=logging.INFO,
|
20 |
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
21 |
+
)
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
+
# Database URL from environment
|
25 |
+
db_url = os.getenv("DATABASE_URL", "")
|
26 |
+
if db_url.startswith("postgresql://"):
|
27 |
+
# Remove sslmode parameter if present which causes issues with asyncpg
|
28 |
+
if "?" in db_url:
|
29 |
+
base_url, params = db_url.split("?", 1)
|
30 |
+
param_list = params.split("&")
|
31 |
+
filtered_params = [p for p in param_list if not p.startswith("sslmode=")]
|
32 |
+
if filtered_params:
|
33 |
+
db_url = f"{base_url}?{'&'.join(filtered_params)}"
|
34 |
+
else:
|
35 |
+
db_url = base_url
|
36 |
+
|
37 |
+
ASYNC_DATABASE_URL = db_url.replace("postgresql://", "postgresql+asyncpg://", 1)
|
38 |
+
else:
|
39 |
+
ASYNC_DATABASE_URL = "postgresql+asyncpg://postgres:postgres@localhost:5432/postgres"
|
40 |
+
|
41 |
+
|
42 |
+
async def check_db_initialized():
|
43 |
+
"""Check if the database is initialized with required tables."""
|
44 |
+
try:
|
45 |
+
engine = create_async_engine(
|
46 |
+
ASYNC_DATABASE_URL,
|
47 |
+
echo=False,
|
48 |
+
)
|
49 |
+
|
50 |
+
# Create session factory
|
51 |
+
async_session = sessionmaker(
|
52 |
+
engine,
|
53 |
+
class_=AsyncSession,
|
54 |
+
expire_on_commit=False
|
55 |
+
)
|
56 |
+
|
57 |
+
async with async_session() as session:
|
58 |
+
# Try to query tables
|
59 |
+
# Replace with actual table names once you've defined them
|
60 |
+
try:
|
61 |
+
# Check if the 'users' table exists
|
62 |
+
from sqlalchemy import text
|
63 |
+
query = text("SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'users')")
|
64 |
+
result = await session.execute(query)
|
65 |
+
exists = result.scalar()
|
66 |
+
|
67 |
+
if exists:
|
68 |
+
logger.info("Database is initialized.")
|
69 |
+
return True
|
70 |
+
else:
|
71 |
+
logger.warning("Database tables are not initialized.")
|
72 |
+
return False
|
73 |
+
except Exception as e:
|
74 |
+
logger.error(f"Error checking tables: {e}")
|
75 |
+
return False
|
76 |
+
except Exception as e:
|
77 |
+
logger.error(f"Failed to connect to database: {e}")
|
78 |
+
return False
|
79 |
+
|
80 |
+
|
81 |
+
def initialize_database():
|
82 |
+
"""Initialize the database with required tables."""
|
83 |
+
try:
|
84 |
+
# Call the init_db.py script
|
85 |
+
logger.info("Initializing database...")
|
86 |
+
|
87 |
+
# Get the current directory
|
88 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
89 |
+
script_path = os.path.join(current_dir, "scripts", "init_db.py")
|
90 |
+
|
91 |
+
# Run the script using the current Python interpreter
|
92 |
+
result = subprocess.run([sys.executable, script_path], capture_output=True, text=True)
|
93 |
+
|
94 |
+
if result.returncode == 0:
|
95 |
+
logger.info("Database initialized successfully.")
|
96 |
+
logger.debug(result.stdout)
|
97 |
+
return True
|
98 |
+
else:
|
99 |
+
logger.error(f"Failed to initialize database: {result.stderr}")
|
100 |
+
return False
|
101 |
+
except Exception as e:
|
102 |
+
logger.error(f"Error initializing database: {e}")
|
103 |
+
return False
|
104 |
+
|
105 |
+
|
106 |
+
def ensure_database_initialized():
|
107 |
+
"""Ensure the database is initialized with required tables."""
|
108 |
+
is_initialized = asyncio.run(check_db_initialized())
|
109 |
+
|
110 |
+
if not is_initialized:
|
111 |
+
return initialize_database()
|
112 |
+
|
113 |
+
return True
|
src/models/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Package initialization for models.
|
3 |
+
"""
|
src/models/alert.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Model for storing alerts generated from threats and dark web mentions.
|
3 |
+
"""
|
4 |
+
from sqlalchemy import Column, String, Text, Integer, DateTime, ForeignKey, Enum, Boolean
|
5 |
+
from sqlalchemy.orm import relationship
|
6 |
+
import enum
|
7 |
+
from datetime import datetime
|
8 |
+
from typing import Optional
|
9 |
+
|
10 |
+
from src.models.base import BaseModel
|
11 |
+
from src.models.threat import ThreatSeverity
|
12 |
+
|
13 |
+
class AlertCategory(enum.Enum):
|
14 |
+
"""Categories of alerts."""
|
15 |
+
THREAT_DETECTED = "Threat Detected"
|
16 |
+
MENTION_DETECTED = "Mention Detected"
|
17 |
+
CREDENTIAL_LEAK = "Credential Leak"
|
18 |
+
DATA_BREACH = "Data Breach"
|
19 |
+
VULNERABILITY = "Vulnerability"
|
20 |
+
MALWARE = "Malware"
|
21 |
+
PHISHING = "Phishing"
|
22 |
+
SUSPICIOUS_ACTIVITY = "Suspicious Activity"
|
23 |
+
SYSTEM = "System Alert"
|
24 |
+
OTHER = "Other"
|
25 |
+
|
26 |
+
|
27 |
+
class AlertStatus(enum.Enum):
|
28 |
+
"""Status of alerts."""
|
29 |
+
NEW = "New"
|
30 |
+
ASSIGNED = "Assigned"
|
31 |
+
INVESTIGATING = "Investigating"
|
32 |
+
RESOLVED = "Resolved"
|
33 |
+
FALSE_POSITIVE = "False Positive"
|
34 |
+
IGNORED = "Ignored"
|
35 |
+
|
36 |
+
|
37 |
+
class Alert(BaseModel):
|
38 |
+
"""Model for alerts generated from threats and mentions."""
|
39 |
+
__tablename__ = "alerts"
|
40 |
+
|
41 |
+
# Alert details
|
42 |
+
title = Column(String(255), nullable=False)
|
43 |
+
description = Column(Text, nullable=False)
|
44 |
+
severity = Column(Enum(ThreatSeverity), nullable=False)
|
45 |
+
status = Column(Enum(AlertStatus), nullable=False, default=AlertStatus.NEW)
|
46 |
+
category = Column(Enum(AlertCategory), nullable=False)
|
47 |
+
|
48 |
+
# Alert metadata
|
49 |
+
generated_at = Column(DateTime, default=datetime.utcnow)
|
50 |
+
source_url = Column(String(1024))
|
51 |
+
is_read = Column(Boolean, default=False)
|
52 |
+
|
53 |
+
# Relationships
|
54 |
+
threat_id = Column(Integer, ForeignKey("threats.id"))
|
55 |
+
threat = relationship("Threat", back_populates="alerts")
|
56 |
+
|
57 |
+
mention_id = Column(Integer, ForeignKey("dark_web_mentions.id"))
|
58 |
+
mention = relationship("DarkWebMention", back_populates="alerts")
|
59 |
+
|
60 |
+
# Assignment and resolution
|
61 |
+
assigned_to_id = Column(Integer, ForeignKey("users.id"))
|
62 |
+
assigned_to = relationship("User")
|
63 |
+
|
64 |
+
action_taken = Column(Text)
|
65 |
+
resolved_at = Column(DateTime)
|
66 |
+
|
67 |
+
def __repr__(self):
|
68 |
+
return f"<Alert(id={self.id}, title={self.title}, severity={self.severity}, status={self.status})>"
|
src/models/base.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Base model for all database models.
|
3 |
+
"""
|
4 |
+
from datetime import datetime
|
5 |
+
|
6 |
+
from sqlalchemy import Column, Integer, DateTime
|
7 |
+
from sqlalchemy.ext.declarative import declarative_base
|
8 |
+
|
9 |
+
Base = declarative_base()
|
10 |
+
|
11 |
+
class BaseModel(Base):
|
12 |
+
"""
|
13 |
+
Base model for all database models.
|
14 |
+
Provides common fields like id, created_at, updated_at.
|
15 |
+
"""
|
16 |
+
__abstract__ = True
|
17 |
+
|
18 |
+
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
|
19 |
+
created_at = Column(DateTime, default=datetime.utcnow)
|
20 |
+
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
src/models/dark_web_content.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Models for storing dark web content and mentions.
|
3 |
+
"""
|
4 |
+
from sqlalchemy import Column, String, Text, Integer, Float, DateTime, ForeignKey, Enum, Boolean
|
5 |
+
from sqlalchemy.orm import relationship
|
6 |
+
import enum
|
7 |
+
from datetime import datetime
|
8 |
+
|
9 |
+
from src.models.base import BaseModel
|
10 |
+
|
11 |
+
class ContentType(enum.Enum):
|
12 |
+
"""Type of dark web content."""
|
13 |
+
FORUM_POST = "Forum Post"
|
14 |
+
MARKETPLACE_LISTING = "Marketplace Listing"
|
15 |
+
BLOG_ARTICLE = "Blog Article"
|
16 |
+
CHAT_LOG = "Chat Log"
|
17 |
+
PASTE = "Paste"
|
18 |
+
DOCUMENT = "Document"
|
19 |
+
IMAGE = "Image"
|
20 |
+
VIDEO = "Video"
|
21 |
+
SOURCE_CODE = "Source Code"
|
22 |
+
OTHER = "Other"
|
23 |
+
|
24 |
+
|
25 |
+
class ContentStatus(enum.Enum):
|
26 |
+
"""Status of dark web content."""
|
27 |
+
NEW = "New"
|
28 |
+
ANALYZING = "Analyzing"
|
29 |
+
ANALYZED = "Analyzed"
|
30 |
+
RELEVANT = "Relevant"
|
31 |
+
IRRELEVANT = "Irrelevant"
|
32 |
+
ARCHIVED = "Archived"
|
33 |
+
|
34 |
+
|
35 |
+
class DarkWebContent(BaseModel):
|
36 |
+
"""Model for storing dark web content."""
|
37 |
+
__tablename__ = "dark_web_contents"
|
38 |
+
|
39 |
+
# Content source
|
40 |
+
url = Column(String(1024), nullable=False)
|
41 |
+
domain = Column(String(255))
|
42 |
+
|
43 |
+
# Content metadata
|
44 |
+
title = Column(String(500))
|
45 |
+
content = Column(Text, nullable=False)
|
46 |
+
content_type = Column(Enum(ContentType), default=ContentType.OTHER)
|
47 |
+
content_status = Column(Enum(ContentStatus), default=ContentStatus.NEW)
|
48 |
+
|
49 |
+
# Source information
|
50 |
+
source_name = Column(String(255))
|
51 |
+
source_type = Column(String(100))
|
52 |
+
language = Column(String(10))
|
53 |
+
scraped_at = Column(DateTime, default=datetime.utcnow)
|
54 |
+
|
55 |
+
# Analysis results
|
56 |
+
relevance_score = Column(Float, default=0.0)
|
57 |
+
sentiment_score = Column(Float, default=0.0)
|
58 |
+
entity_data = Column(Text) # JSON storage for extracted entities
|
59 |
+
|
60 |
+
# Relationships
|
61 |
+
mentions = relationship("DarkWebMention", back_populates="content", cascade="all, delete-orphan")
|
62 |
+
search_results = relationship("SearchResult", back_populates="content")
|
63 |
+
|
64 |
+
def __repr__(self):
|
65 |
+
return f"<DarkWebContent(id={self.id}, url={self.url}, content_type={self.content_type})>"
|
66 |
+
|
67 |
+
|
68 |
+
class DarkWebMention(BaseModel):
|
69 |
+
"""Model for storing mentions of monitored entities in dark web content."""
|
70 |
+
__tablename__ = "dark_web_mentions"
|
71 |
+
|
72 |
+
# Relationship to content
|
73 |
+
content_id = Column(Integer, ForeignKey("dark_web_contents.id"), nullable=False)
|
74 |
+
content = relationship("DarkWebContent", back_populates="mentions")
|
75 |
+
|
76 |
+
# Mention details
|
77 |
+
keyword = Column(String(100), nullable=False)
|
78 |
+
keyword_category = Column(String(50))
|
79 |
+
|
80 |
+
# Extracted context
|
81 |
+
context = Column(Text)
|
82 |
+
snippet = Column(Text)
|
83 |
+
|
84 |
+
# Mention metadata
|
85 |
+
mention_type = Column(String(50)) # Type of mention (e.g., "brand", "employee", "product")
|
86 |
+
confidence = Column(Float, default=0.0)
|
87 |
+
is_verified = Column(Boolean, default=False)
|
88 |
+
|
89 |
+
# Relationships
|
90 |
+
alerts = relationship("Alert", back_populates="mention", cascade="all, delete-orphan")
|
91 |
+
|
92 |
+
def __repr__(self):
|
93 |
+
return f"<DarkWebMention(id={self.id}, keyword={self.keyword}, content_id={self.content_id})>"
|
src/models/indicator.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Model for storing indicators of compromise (IOCs) and other threat indicators.
|
3 |
+
"""
|
4 |
+
from sqlalchemy import Column, String, Text, Integer, Float, DateTime, ForeignKey, Enum, Boolean
|
5 |
+
from sqlalchemy.orm import relationship
|
6 |
+
import enum
|
7 |
+
from datetime import datetime
|
8 |
+
|
9 |
+
from src.models.base import BaseModel
|
10 |
+
|
11 |
+
class IndicatorType(enum.Enum):
|
12 |
+
"""Type of indicator."""
|
13 |
+
IP_ADDRESS = "IP Address"
|
14 |
+
DOMAIN = "Domain"
|
15 |
+
URL = "URL"
|
16 |
+
HASH = "Hash"
|
17 |
+
EMAIL = "Email"
|
18 |
+
FILE = "File"
|
19 |
+
REGISTRY = "Registry"
|
20 |
+
USER_AGENT = "User Agent"
|
21 |
+
CVE = "CVE"
|
22 |
+
SOFTWARE = "Software"
|
23 |
+
KEYWORD = "Keyword"
|
24 |
+
OTHER = "Other"
|
25 |
+
|
26 |
+
|
27 |
+
class Indicator(BaseModel):
|
28 |
+
"""Model for indicators related to threats."""
|
29 |
+
__tablename__ = "indicators"
|
30 |
+
|
31 |
+
# Indicator details
|
32 |
+
value = Column(String(1024), nullable=False)
|
33 |
+
indicator_type = Column(Enum(IndicatorType), nullable=False)
|
34 |
+
description = Column(Text)
|
35 |
+
is_verified = Column(Boolean, default=False)
|
36 |
+
context = Column(Text)
|
37 |
+
source = Column(String(255))
|
38 |
+
|
39 |
+
# Relationship to threat
|
40 |
+
threat_id = Column(Integer, ForeignKey("threats.id"))
|
41 |
+
threat = relationship("Threat", back_populates="indicators")
|
42 |
+
|
43 |
+
# Confidence and metadata
|
44 |
+
confidence_score = Column(Float, default=0.0)
|
45 |
+
first_seen = Column(DateTime, default=datetime.utcnow)
|
46 |
+
last_seen = Column(DateTime, default=datetime.utcnow)
|
47 |
+
|
48 |
+
def __repr__(self):
|
49 |
+
return f"<Indicator(id={self.id}, value={self.value}, type={self.indicator_type})>"
|
src/models/report.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Model for storing reports generated from threats and analysis.
|
3 |
+
"""
|
4 |
+
from sqlalchemy import Column, String, Text, Integer, DateTime, ForeignKey, Enum, Table
|
5 |
+
from sqlalchemy.orm import relationship
|
6 |
+
import enum
|
7 |
+
from datetime import datetime
|
8 |
+
from typing import List
|
9 |
+
|
10 |
+
from src.models.base import BaseModel
|
11 |
+
from src.models.threat import ThreatSeverity
|
12 |
+
|
13 |
+
# Many-to-many relationship table for reports and threats
|
14 |
+
report_threats = Table(
|
15 |
+
"report_threats",
|
16 |
+
BaseModel.metadata,
|
17 |
+
Column("report_id", Integer, ForeignKey("reports.id"), primary_key=True),
|
18 |
+
Column("threat_id", Integer, ForeignKey("threats.id"), primary_key=True),
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
class ReportType(enum.Enum):
|
23 |
+
"""Type of report."""
|
24 |
+
THREAT_DIGEST = "Threat Digest"
|
25 |
+
DARK_WEB_ANALYSIS = "Dark Web Analysis"
|
26 |
+
VULNERABILITY_ASSESSMENT = "Vulnerability Assessment"
|
27 |
+
INCIDENT_RESPONSE = "Incident Response"
|
28 |
+
THREAT_INTELLIGENCE = "Threat Intelligence"
|
29 |
+
EXECUTIVE_SUMMARY = "Executive Summary"
|
30 |
+
TECHNICAL_ANALYSIS = "Technical Analysis"
|
31 |
+
WEEKLY_SUMMARY = "Weekly Summary"
|
32 |
+
MONTHLY_SUMMARY = "Monthly Summary"
|
33 |
+
CUSTOM = "Custom"
|
34 |
+
|
35 |
+
|
36 |
+
class ReportStatus(enum.Enum):
|
37 |
+
"""Status of report."""
|
38 |
+
DRAFT = "Draft"
|
39 |
+
REVIEW = "In Review"
|
40 |
+
APPROVED = "Approved"
|
41 |
+
PUBLISHED = "Published"
|
42 |
+
ARCHIVED = "Archived"
|
43 |
+
|
44 |
+
|
45 |
+
class Report(BaseModel):
|
46 |
+
"""Model for reports on threats and analysis."""
|
47 |
+
__tablename__ = "reports"
|
48 |
+
|
49 |
+
# Report metadata
|
50 |
+
report_id = Column(String(50), unique=True, nullable=False)
|
51 |
+
title = Column(String(255), nullable=False)
|
52 |
+
summary = Column(Text, nullable=False)
|
53 |
+
content = Column(Text, nullable=False)
|
54 |
+
report_type = Column(Enum(ReportType), nullable=False)
|
55 |
+
status = Column(Enum(ReportStatus), nullable=False, default=ReportStatus.DRAFT)
|
56 |
+
severity = Column(Enum(ThreatSeverity))
|
57 |
+
|
58 |
+
# Report scheduling and timing
|
59 |
+
publish_date = Column(DateTime)
|
60 |
+
time_period_start = Column(DateTime)
|
61 |
+
time_period_end = Column(DateTime)
|
62 |
+
|
63 |
+
# Keywords for searchability
|
64 |
+
keywords = Column(String(500))
|
65 |
+
|
66 |
+
# Related entities
|
67 |
+
author_id = Column(Integer, ForeignKey("users.id"))
|
68 |
+
author = relationship("User")
|
69 |
+
|
70 |
+
# Many-to-many relationship with threats
|
71 |
+
threats = relationship(
|
72 |
+
"Threat",
|
73 |
+
secondary=report_threats,
|
74 |
+
backref="reports"
|
75 |
+
)
|
76 |
+
|
77 |
+
def __repr__(self):
|
78 |
+
return f"<Report(id={self.id}, report_id={self.report_id}, title={self.title})>"
|
src/models/search_history.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Search History Model
|
3 |
+
|
4 |
+
This module defines the search history model for tracking dark web searches and trends.
|
5 |
+
"""
|
6 |
+
from datetime import datetime
|
7 |
+
from typing import Optional, List
|
8 |
+
from sqlalchemy import Column, Integer, String, DateTime, Boolean, ForeignKey, Text, Float
|
9 |
+
from sqlalchemy.orm import relationship
|
10 |
+
|
11 |
+
from src.models.base import Base
|
12 |
+
|
13 |
+
class SearchHistory(Base):
|
14 |
+
"""
|
15 |
+
Model for tracking search history and trends in dark web content.
|
16 |
+
|
17 |
+
Attributes:
|
18 |
+
id: Unique identifier for the search
|
19 |
+
query: The search query or term
|
20 |
+
timestamp: When the search was performed
|
21 |
+
user_id: ID of the user who performed the search (optional)
|
22 |
+
result_count: Number of results returned
|
23 |
+
category: Category of the search (e.g., "marketplace", "forum", "paste", etc.)
|
24 |
+
is_saved: Whether this is a saved/favorited search
|
25 |
+
notes: Optional notes about this search
|
26 |
+
tags: Tags associated with this search
|
27 |
+
"""
|
28 |
+
__tablename__ = "search_history"
|
29 |
+
|
30 |
+
id = Column(Integer, primary_key=True, index=True)
|
31 |
+
query = Column(String(255), nullable=False, index=True)
|
32 |
+
timestamp = Column(DateTime, default=datetime.utcnow, nullable=False, index=True)
|
33 |
+
user_id = Column(Integer, ForeignKey("users.id"), nullable=True)
|
34 |
+
result_count = Column(Integer, default=0)
|
35 |
+
category = Column(String(50), nullable=True)
|
36 |
+
is_saved = Column(Boolean, default=False)
|
37 |
+
notes = Column(Text, nullable=True)
|
38 |
+
tags = Column(String(255), nullable=True) # Comma-separated tags
|
39 |
+
|
40 |
+
# Relationships
|
41 |
+
user = relationship("User", back_populates="searches")
|
42 |
+
search_results = relationship("SearchResult", back_populates="search", cascade="all, delete-orphan")
|
43 |
+
|
44 |
+
def __repr__(self):
|
45 |
+
return f"<SearchHistory(id={self.id}, query='{self.query}', timestamp={self.timestamp})>"
|
46 |
+
|
47 |
+
|
48 |
+
class SearchResult(Base):
|
49 |
+
"""
|
50 |
+
Model for individual search results associated with a search query.
|
51 |
+
|
52 |
+
Attributes:
|
53 |
+
id: Unique identifier for the search result
|
54 |
+
search_id: ID of the parent search
|
55 |
+
content_id: ID of the content found (if in our database)
|
56 |
+
url: URL of the result
|
57 |
+
title: Title of the result
|
58 |
+
snippet: Text snippet from the result
|
59 |
+
source: Source of the result (e.g., "dark web forum", "marketplace", etc.)
|
60 |
+
relevance_score: Score indicating relevance to the search query
|
61 |
+
timestamp: When this result was found
|
62 |
+
"""
|
63 |
+
__tablename__ = "search_results"
|
64 |
+
|
65 |
+
id = Column(Integer, primary_key=True, index=True)
|
66 |
+
search_id = Column(Integer, ForeignKey("search_history.id"), nullable=False)
|
67 |
+
content_id = Column(Integer, ForeignKey("dark_web_contents.id"), nullable=True)
|
68 |
+
url = Column(String(1024), nullable=True)
|
69 |
+
title = Column(String(255), nullable=True)
|
70 |
+
snippet = Column(Text, nullable=True)
|
71 |
+
source = Column(String(100), nullable=True)
|
72 |
+
relevance_score = Column(Float, default=0.0)
|
73 |
+
timestamp = Column(DateTime, default=datetime.utcnow, nullable=False)
|
74 |
+
|
75 |
+
# Relationships
|
76 |
+
search = relationship("SearchHistory", back_populates="search_results")
|
77 |
+
content = relationship("DarkWebContent", back_populates="search_results")
|
78 |
+
|
79 |
+
def __repr__(self):
|
80 |
+
return f"<SearchResult(id={self.id}, search_id={self.search_id}, title='{self.title}')>"
|
81 |
+
|
82 |
+
|
83 |
+
class SavedSearch(Base):
|
84 |
+
"""
|
85 |
+
Model for saved searches with custom parameters for periodic monitoring.
|
86 |
+
|
87 |
+
Attributes:
|
88 |
+
id: Unique identifier for the saved search
|
89 |
+
name: Name of the saved search
|
90 |
+
query: The search query or term
|
91 |
+
user_id: ID of the user who created the saved search
|
92 |
+
created_at: When this saved search was created
|
93 |
+
last_run_at: When this saved search was last executed
|
94 |
+
frequency: How often to run this search (in hours, 0 for manual only)
|
95 |
+
notification_enabled: Whether to send notifications for new results
|
96 |
+
is_active: Whether this saved search is active
|
97 |
+
threshold: Threshold for notifications (e.g., min number of new results)
|
98 |
+
"""
|
99 |
+
__tablename__ = "saved_searches"
|
100 |
+
|
101 |
+
id = Column(Integer, primary_key=True, index=True)
|
102 |
+
name = Column(String(100), nullable=False)
|
103 |
+
query = Column(String(255), nullable=False)
|
104 |
+
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
|
105 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
106 |
+
last_run_at = Column(DateTime, nullable=True)
|
107 |
+
frequency = Column(Integer, default=24) # In hours, 0 for manual only
|
108 |
+
notification_enabled = Column(Boolean, default=True)
|
109 |
+
is_active = Column(Boolean, default=True)
|
110 |
+
threshold = Column(Integer, default=1) # Min number of new results for notification
|
111 |
+
category = Column(String(50), nullable=True)
|
112 |
+
|
113 |
+
# Relationships
|
114 |
+
user = relationship("User", back_populates="saved_searches")
|
115 |
+
|
116 |
+
def __repr__(self):
|
117 |
+
return f"<SavedSearch(id={self.id}, name='{self.name}', query='{self.query}')>"
|
118 |
+
|
119 |
+
|
120 |
+
class TrendTopic(Base):
|
121 |
+
"""
|
122 |
+
Model for tracking trending topics on the dark web.
|
123 |
+
|
124 |
+
Attributes:
|
125 |
+
id: Unique identifier for the trend topic
|
126 |
+
topic: The topic or term
|
127 |
+
first_seen: When this topic was first detected
|
128 |
+
last_seen: When this topic was last detected
|
129 |
+
mention_count: Number of mentions of this topic
|
130 |
+
growth_rate: Rate of growth in mentions (percentage)
|
131 |
+
category: Category of the trend (e.g., "ransomware", "data breach", etc.)
|
132 |
+
is_active: Whether this trend is currently active
|
133 |
+
"""
|
134 |
+
__tablename__ = "trend_topics"
|
135 |
+
|
136 |
+
id = Column(Integer, primary_key=True, index=True)
|
137 |
+
topic = Column(String(100), nullable=False, index=True)
|
138 |
+
first_seen = Column(DateTime, default=datetime.utcnow, nullable=False)
|
139 |
+
last_seen = Column(DateTime, default=datetime.utcnow, nullable=False)
|
140 |
+
mention_count = Column(Integer, default=1)
|
141 |
+
growth_rate = Column(Float, default=0.0)
|
142 |
+
category = Column(String(50), nullable=True)
|
143 |
+
is_active = Column(Boolean, default=True)
|
144 |
+
|
145 |
+
def __repr__(self):
|
146 |
+
return f"<TrendTopic(id={self.id}, topic='{self.topic}', mention_count={self.mention_count})>"
|
src/models/subscription.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Subscription models for the application.
|
3 |
+
|
4 |
+
This module defines database models for subscription management.
|
5 |
+
"""
|
6 |
+
from enum import Enum
|
7 |
+
from sqlalchemy import Column, Integer, String, Float, Boolean, DateTime, ForeignKey, Enum as SQLAlchemyEnum
|
8 |
+
from sqlalchemy.orm import relationship
|
9 |
+
from sqlalchemy.sql import func
|
10 |
+
|
11 |
+
from src.models.base import Base
|
12 |
+
|
13 |
+
|
14 |
+
class SubscriptionTier(str, Enum):
|
15 |
+
"""Subscription tier enum."""
|
16 |
+
FREE = "FREE"
|
17 |
+
BASIC = "BASIC"
|
18 |
+
PROFESSIONAL = "PROFESSIONAL"
|
19 |
+
ENTERPRISE = "ENTERPRISE"
|
20 |
+
|
21 |
+
|
22 |
+
class BillingPeriod(str, Enum):
|
23 |
+
"""Billing period enum."""
|
24 |
+
MONTHLY = "MONTHLY"
|
25 |
+
ANNUALLY = "ANNUALLY"
|
26 |
+
CUSTOM = "CUSTOM"
|
27 |
+
|
28 |
+
|
29 |
+
class SubscriptionPlan(Base):
|
30 |
+
"""Subscription plan model."""
|
31 |
+
__tablename__ = "subscription_plans"
|
32 |
+
|
33 |
+
id = Column(Integer, primary_key=True, index=True)
|
34 |
+
name = Column(String(100), nullable=False)
|
35 |
+
tier = Column(SQLAlchemyEnum(SubscriptionTier), nullable=False)
|
36 |
+
description = Column(String(500))
|
37 |
+
price_monthly = Column(Float, nullable=False)
|
38 |
+
price_annually = Column(Float, nullable=False)
|
39 |
+
is_active = Column(Boolean, default=True)
|
40 |
+
|
41 |
+
# Features
|
42 |
+
max_alerts = Column(Integer, default=10)
|
43 |
+
max_reports = Column(Integer, default=5)
|
44 |
+
max_searches_per_day = Column(Integer, default=20)
|
45 |
+
max_monitoring_keywords = Column(Integer, default=10)
|
46 |
+
max_data_retention_days = Column(Integer, default=30)
|
47 |
+
supports_api_access = Column(Boolean, default=False)
|
48 |
+
supports_live_feed = Column(Boolean, default=False)
|
49 |
+
supports_dark_web_monitoring = Column(Boolean, default=False)
|
50 |
+
supports_export = Column(Boolean, default=False)
|
51 |
+
supports_advanced_analytics = Column(Boolean, default=False)
|
52 |
+
|
53 |
+
# Stripe product ID (for integration with Stripe)
|
54 |
+
stripe_product_id = Column(String(100))
|
55 |
+
stripe_monthly_price_id = Column(String(100))
|
56 |
+
stripe_annual_price_id = Column(String(100))
|
57 |
+
|
58 |
+
# Timestamps
|
59 |
+
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
60 |
+
updated_at = Column(DateTime(timezone=True), onupdate=func.now())
|
61 |
+
|
62 |
+
# Relationships
|
63 |
+
subscriptions = relationship("UserSubscription", back_populates="plan")
|
64 |
+
|
65 |
+
def __repr__(self):
|
66 |
+
return f"<SubscriptionPlan(id={self.id}, name={self.name}, tier={self.tier})>"
|
67 |
+
|
68 |
+
|
69 |
+
class SubscriptionStatus(str, Enum):
|
70 |
+
"""Subscription status enum."""
|
71 |
+
ACTIVE = "ACTIVE"
|
72 |
+
PAST_DUE = "PAST_DUE"
|
73 |
+
CANCELED = "CANCELED"
|
74 |
+
TRIALING = "TRIALING"
|
75 |
+
INCOMPLETE = "INCOMPLETE"
|
76 |
+
INCOMPLETE_EXPIRED = "INCOMPLETE_EXPIRED"
|
77 |
+
|
78 |
+
|
79 |
+
class UserSubscription(Base):
|
80 |
+
"""User subscription model."""
|
81 |
+
__tablename__ = "user_subscriptions"
|
82 |
+
|
83 |
+
id = Column(Integer, primary_key=True, index=True)
|
84 |
+
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
|
85 |
+
plan_id = Column(Integer, ForeignKey("subscription_plans.id"), nullable=False)
|
86 |
+
status = Column(SQLAlchemyEnum(SubscriptionStatus), nullable=False, default=SubscriptionStatus.ACTIVE)
|
87 |
+
|
88 |
+
# Billing details
|
89 |
+
billing_period = Column(SQLAlchemyEnum(BillingPeriod), nullable=False, default=BillingPeriod.MONTHLY)
|
90 |
+
current_period_start = Column(DateTime(timezone=True))
|
91 |
+
current_period_end = Column(DateTime(timezone=True))
|
92 |
+
|
93 |
+
# Stripe subscription ID
|
94 |
+
stripe_subscription_id = Column(String(100))
|
95 |
+
stripe_customer_id = Column(String(100))
|
96 |
+
|
97 |
+
# Timestamps
|
98 |
+
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
99 |
+
updated_at = Column(DateTime(timezone=True), onupdate=func.now())
|
100 |
+
canceled_at = Column(DateTime(timezone=True))
|
101 |
+
|
102 |
+
# Relationships
|
103 |
+
user = relationship("User", back_populates="subscriptions")
|
104 |
+
plan = relationship("SubscriptionPlan", back_populates="subscriptions")
|
105 |
+
payment_history = relationship("PaymentHistory", back_populates="subscription")
|
106 |
+
|
107 |
+
def __repr__(self):
|
108 |
+
return f"<UserSubscription(id={self.id}, user_id={self.user_id}, plan_id={self.plan_id})>"
|
109 |
+
|
110 |
+
|
111 |
+
class PaymentStatus(str, Enum):
|
112 |
+
"""Payment status enum."""
|
113 |
+
SUCCEEDED = "SUCCEEDED"
|
114 |
+
PENDING = "PENDING"
|
115 |
+
FAILED = "FAILED"
|
116 |
+
REFUNDED = "REFUNDED"
|
117 |
+
|
118 |
+
|
119 |
+
class PaymentHistory(Base):
|
120 |
+
"""Payment history model."""
|
121 |
+
__tablename__ = "payment_history"
|
122 |
+
|
123 |
+
id = Column(Integer, primary_key=True, index=True)
|
124 |
+
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
|
125 |
+
subscription_id = Column(Integer, ForeignKey("user_subscriptions.id"), nullable=False)
|
126 |
+
|
127 |
+
amount = Column(Float, nullable=False)
|
128 |
+
currency = Column(String(3), default="USD")
|
129 |
+
status = Column(SQLAlchemyEnum(PaymentStatus), nullable=False)
|
130 |
+
|
131 |
+
# Stripe payment intent ID
|
132 |
+
stripe_payment_intent_id = Column(String(100))
|
133 |
+
stripe_invoice_id = Column(String(100))
|
134 |
+
|
135 |
+
# Timestamps
|
136 |
+
payment_date = Column(DateTime(timezone=True), server_default=func.now())
|
137 |
+
|
138 |
+
# Relationships
|
139 |
+
user = relationship("User")
|
140 |
+
subscription = relationship("UserSubscription", back_populates="payment_history")
|
141 |
+
|
142 |
+
def __repr__(self):
|
143 |
+
return f"<PaymentHistory(id={self.id}, user_id={self.user_id}, amount={self.amount}, status={self.status})>"
|
src/models/threat.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Model for storing threat information discovered in dark web monitoring.
|
3 |
+
"""
|
4 |
+
from sqlalchemy import Column, String, Text, Integer, Float, DateTime, Enum
|
5 |
+
from sqlalchemy.orm import relationship
|
6 |
+
import enum
|
7 |
+
from datetime import datetime
|
8 |
+
|
9 |
+
from src.models.base import BaseModel
|
10 |
+
|
11 |
+
class ThreatSeverity(enum.Enum):
|
12 |
+
"""Severity levels for threats."""
|
13 |
+
CRITICAL = "Critical"
|
14 |
+
HIGH = "High"
|
15 |
+
MEDIUM = "Medium"
|
16 |
+
LOW = "Low"
|
17 |
+
INFORMATIONAL = "Informational"
|
18 |
+
|
19 |
+
|
20 |
+
class ThreatCategory(enum.Enum):
|
21 |
+
"""Categories of threats."""
|
22 |
+
DATA_BREACH = "Data Breach"
|
23 |
+
CREDENTIAL_LEAK = "Credential Leak"
|
24 |
+
VULNERABILITY = "Vulnerability"
|
25 |
+
MALWARE = "Malware"
|
26 |
+
PHISHING = "Phishing"
|
27 |
+
IDENTITY_THEFT = "Identity Theft"
|
28 |
+
RANSOMWARE = "Ransomware"
|
29 |
+
DARK_WEB_MENTION = "Dark Web Mention"
|
30 |
+
SOCIAL_ENGINEERING = "Social Engineering"
|
31 |
+
INSIDER_THREAT = "Insider Threat"
|
32 |
+
APT = "Advanced Persistent Threat"
|
33 |
+
OTHER = "Other"
|
34 |
+
|
35 |
+
|
36 |
+
class ThreatStatus(enum.Enum):
|
37 |
+
"""Status of a threat."""
|
38 |
+
NEW = "New"
|
39 |
+
INVESTIGATING = "Investigating"
|
40 |
+
CONFIRMED = "Confirmed"
|
41 |
+
MITIGATED = "Mitigated"
|
42 |
+
RESOLVED = "Resolved"
|
43 |
+
FALSE_POSITIVE = "False Positive"
|
44 |
+
|
45 |
+
|
46 |
+
class Threat(BaseModel):
|
47 |
+
"""Model for threats discovered in dark web monitoring."""
|
48 |
+
__tablename__ = "threats"
|
49 |
+
|
50 |
+
# Threat metadata
|
51 |
+
title = Column(String(255), nullable=False)
|
52 |
+
description = Column(Text, nullable=False)
|
53 |
+
severity = Column(Enum(ThreatSeverity), nullable=False)
|
54 |
+
category = Column(Enum(ThreatCategory), nullable=False)
|
55 |
+
status = Column(Enum(ThreatStatus), nullable=False, default=ThreatStatus.NEW)
|
56 |
+
|
57 |
+
# Source information
|
58 |
+
source_url = Column(String(1024))
|
59 |
+
source_name = Column(String(255))
|
60 |
+
source_type = Column(String(100))
|
61 |
+
discovered_at = Column(DateTime, default=datetime.utcnow)
|
62 |
+
|
63 |
+
# Affected entity
|
64 |
+
affected_entity = Column(String(255))
|
65 |
+
affected_entity_type = Column(String(100))
|
66 |
+
|
67 |
+
# Risk assessment
|
68 |
+
confidence_score = Column(Float, default=0.0)
|
69 |
+
risk_score = Column(Float, default=0.0)
|
70 |
+
|
71 |
+
# Relationships
|
72 |
+
indicators = relationship("Indicator", back_populates="threat", cascade="all, delete-orphan")
|
73 |
+
alerts = relationship("Alert", back_populates="threat", cascade="all, delete-orphan")
|
74 |
+
|
75 |
+
def __repr__(self):
|
76 |
+
return f"<Threat(id={self.id}, title={self.title}, severity={self.severity})>"
|
src/models/user.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Model for users of the application.
|
3 |
+
"""
|
4 |
+
from sqlalchemy import Column, String, Boolean
|
5 |
+
from sqlalchemy.orm import relationship
|
6 |
+
from typing import List
|
7 |
+
|
8 |
+
from src.models.base import BaseModel
|
9 |
+
|
10 |
+
class User(BaseModel):
|
11 |
+
"""
|
12 |
+
User model for authentication and authorization.
|
13 |
+
"""
|
14 |
+
__tablename__ = "users"
|
15 |
+
|
16 |
+
username = Column(String(50), unique=True, index=True, nullable=False)
|
17 |
+
email = Column(String(100), unique=True, index=True, nullable=False)
|
18 |
+
full_name = Column(String(100))
|
19 |
+
hashed_password = Column(String(100), nullable=False)
|
20 |
+
is_active = Column(Boolean, default=True)
|
21 |
+
is_superuser = Column(Boolean, default=False)
|
22 |
+
avatar_url = Column(String(255), nullable=True)
|
23 |
+
bio = Column(String, nullable=True)
|
24 |
+
last_login = Column(String(255), nullable=True)
|
25 |
+
|
26 |
+
# Relationships
|
27 |
+
searches = relationship("SearchHistory", back_populates="user")
|
28 |
+
saved_searches = relationship("SavedSearch", back_populates="user")
|
29 |
+
subscriptions = relationship("UserSubscription", back_populates="user")
|
30 |
+
|
31 |
+
def __repr__(self):
|
32 |
+
return f"<User(id={self.id}, username={self.username}, email={self.email})>"
|
src/streamlit_database.py
ADDED
@@ -0,0 +1,850 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Database integration for Streamlit application.
|
3 |
+
|
4 |
+
This module provides functions to interact with the database for the Streamlit frontend.
|
5 |
+
It wraps the async database functions in sync functions for Streamlit compatibility.
|
6 |
+
"""
|
7 |
+
import os
|
8 |
+
import asyncio
|
9 |
+
import pandas as pd
|
10 |
+
from typing import List, Dict, Any, Optional, Union, Tuple
|
11 |
+
from datetime import datetime, timedelta
|
12 |
+
|
13 |
+
from sqlalchemy.orm import sessionmaker
|
14 |
+
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
|
15 |
+
|
16 |
+
# Import database models
|
17 |
+
from src.models.threat import Threat, ThreatSeverity, ThreatStatus, ThreatCategory
|
18 |
+
from src.models.indicator import Indicator, IndicatorType
|
19 |
+
from src.models.dark_web_content import DarkWebContent, DarkWebMention, ContentType, ContentStatus
|
20 |
+
from src.models.alert import Alert, AlertStatus, AlertCategory
|
21 |
+
from src.models.report import Report, ReportType, ReportStatus
|
22 |
+
|
23 |
+
# Import service functions
|
24 |
+
from src.api.services.dark_web_content_service import (
|
25 |
+
create_content, get_content_by_id, get_contents, count_contents,
|
26 |
+
create_mention, get_mentions, create_threat_from_content
|
27 |
+
)
|
28 |
+
from src.api.services.alert_service import (
|
29 |
+
create_alert, get_alert_by_id, get_alerts, count_alerts,
|
30 |
+
update_alert_status, mark_alert_as_read, get_alert_counts_by_severity
|
31 |
+
)
|
32 |
+
from src.api.services.threat_service import (
|
33 |
+
create_threat, get_threat_by_id, get_threats, count_threats,
|
34 |
+
update_threat, add_indicator_to_threat, get_threat_statistics
|
35 |
+
)
|
36 |
+
from src.api.services.report_service import (
|
37 |
+
create_report, get_report_by_id, get_reports, count_reports,
|
38 |
+
update_report, add_threat_to_report, publish_report
|
39 |
+
)
|
40 |
+
|
41 |
+
# Import schemas
|
42 |
+
from src.api.schemas import PaginationParams
|
43 |
+
|
44 |
+
# Get database URL from environment
|
45 |
+
db_url = os.getenv("DATABASE_URL", "")
|
46 |
+
if db_url.startswith("postgresql://"):
|
47 |
+
# Remove sslmode parameter if present which causes issues with asyncpg
|
48 |
+
if "?" in db_url:
|
49 |
+
base_url, params = db_url.split("?", 1)
|
50 |
+
param_list = params.split("&")
|
51 |
+
filtered_params = [p for p in param_list if not p.startswith("sslmode=")]
|
52 |
+
if filtered_params:
|
53 |
+
db_url = f"{base_url}?{'&'.join(filtered_params)}"
|
54 |
+
else:
|
55 |
+
db_url = base_url
|
56 |
+
|
57 |
+
ASYNC_DATABASE_URL = db_url.replace("postgresql://", "postgresql+asyncpg://", 1)
|
58 |
+
else:
|
59 |
+
ASYNC_DATABASE_URL = "postgresql+asyncpg://postgres:postgres@localhost:5432/postgres"
|
60 |
+
|
61 |
+
# Create async engine
|
62 |
+
engine = create_async_engine(
|
63 |
+
ASYNC_DATABASE_URL,
|
64 |
+
echo=False,
|
65 |
+
future=True,
|
66 |
+
pool_size=5,
|
67 |
+
max_overflow=10
|
68 |
+
)
|
69 |
+
|
70 |
+
# Create async session factory
|
71 |
+
async_session = sessionmaker(
|
72 |
+
engine,
|
73 |
+
class_=AsyncSession,
|
74 |
+
expire_on_commit=False
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
def run_async(coro):
|
79 |
+
"""Run an async function in a sync context."""
|
80 |
+
try:
|
81 |
+
loop = asyncio.get_event_loop()
|
82 |
+
except RuntimeError:
|
83 |
+
loop = asyncio.new_event_loop()
|
84 |
+
asyncio.set_event_loop(loop)
|
85 |
+
|
86 |
+
return loop.run_until_complete(coro)
|
87 |
+
|
88 |
+
|
89 |
+
async def get_session():
|
90 |
+
"""Get an async database session."""
|
91 |
+
async with async_session() as session:
|
92 |
+
yield session
|
93 |
+
|
94 |
+
|
95 |
+
def get_db_session():
|
96 |
+
"""Get a database session for use in Streamlit."""
|
97 |
+
try:
|
98 |
+
session_gen = get_session().__aiter__()
|
99 |
+
return run_async(session_gen.__anext__())
|
100 |
+
except StopAsyncIteration:
|
101 |
+
return None
|
102 |
+
|
103 |
+
|
104 |
+
async def get_async_session():
|
105 |
+
"""
|
106 |
+
Async context manager for database sessions.
|
107 |
+
|
108 |
+
Usage:
|
109 |
+
async with get_async_session() as session:
|
110 |
+
# Use session here
|
111 |
+
"""
|
112 |
+
session = async_session()
|
113 |
+
try:
|
114 |
+
yield session
|
115 |
+
await session.commit()
|
116 |
+
except Exception as e:
|
117 |
+
await session.rollback()
|
118 |
+
raise e
|
119 |
+
finally:
|
120 |
+
await session.close()
|
121 |
+
|
122 |
+
|
123 |
+
# Dark Web Content functions
|
124 |
+
def get_dark_web_contents(
|
125 |
+
page: int = 1,
|
126 |
+
size: int = 10,
|
127 |
+
content_type: Optional[List[ContentType]] = None,
|
128 |
+
content_status: Optional[List[ContentStatus]] = None,
|
129 |
+
source_name: Optional[str] = None,
|
130 |
+
search_query: Optional[str] = None,
|
131 |
+
from_date: Optional[datetime] = None,
|
132 |
+
to_date: Optional[datetime] = None,
|
133 |
+
) -> pd.DataFrame:
|
134 |
+
"""
|
135 |
+
Get dark web contents as a DataFrame.
|
136 |
+
|
137 |
+
Args:
|
138 |
+
page: Page number
|
139 |
+
size: Page size
|
140 |
+
content_type: Filter by content type
|
141 |
+
content_status: Filter by content status
|
142 |
+
source_name: Filter by source name
|
143 |
+
search_query: Search in title and content
|
144 |
+
from_date: Filter by scraped_at >= from_date
|
145 |
+
to_date: Filter by scraped_at <= to_date
|
146 |
+
|
147 |
+
Returns:
|
148 |
+
pd.DataFrame: DataFrame with dark web contents
|
149 |
+
"""
|
150 |
+
session = get_db_session()
|
151 |
+
|
152 |
+
if not session:
|
153 |
+
return pd.DataFrame()
|
154 |
+
|
155 |
+
contents = run_async(get_contents(
|
156 |
+
db=session,
|
157 |
+
pagination=PaginationParams(page=page, size=size),
|
158 |
+
content_type=content_type,
|
159 |
+
content_status=content_status,
|
160 |
+
source_name=source_name,
|
161 |
+
search_query=search_query,
|
162 |
+
from_date=from_date,
|
163 |
+
to_date=to_date,
|
164 |
+
))
|
165 |
+
|
166 |
+
if not contents:
|
167 |
+
return pd.DataFrame()
|
168 |
+
|
169 |
+
# Convert to DataFrame
|
170 |
+
data = []
|
171 |
+
for content in contents:
|
172 |
+
data.append({
|
173 |
+
"id": content.id,
|
174 |
+
"url": content.url,
|
175 |
+
"title": content.title,
|
176 |
+
"content_type": content.content_type.value if content.content_type else None,
|
177 |
+
"content_status": content.content_status.value if content.content_status else None,
|
178 |
+
"source_name": content.source_name,
|
179 |
+
"source_type": content.source_type,
|
180 |
+
"language": content.language,
|
181 |
+
"scraped_at": content.scraped_at,
|
182 |
+
"relevance_score": content.relevance_score,
|
183 |
+
"sentiment_score": content.sentiment_score,
|
184 |
+
})
|
185 |
+
|
186 |
+
return pd.DataFrame(data)
|
187 |
+
|
188 |
+
|
189 |
+
def add_dark_web_content(
|
190 |
+
url: str,
|
191 |
+
content: str,
|
192 |
+
title: Optional[str] = None,
|
193 |
+
content_type: ContentType = ContentType.OTHER,
|
194 |
+
source_name: Optional[str] = None,
|
195 |
+
source_type: Optional[str] = None,
|
196 |
+
) -> Optional[DarkWebContent]:
|
197 |
+
"""
|
198 |
+
Add a new dark web content.
|
199 |
+
|
200 |
+
Args:
|
201 |
+
url: URL of the content
|
202 |
+
content: Text content
|
203 |
+
title: Title of the content
|
204 |
+
content_type: Type of content
|
205 |
+
source_name: Name of the source
|
206 |
+
source_type: Type of source
|
207 |
+
|
208 |
+
Returns:
|
209 |
+
Optional[DarkWebContent]: Created content or None
|
210 |
+
"""
|
211 |
+
session = get_db_session()
|
212 |
+
|
213 |
+
if not session:
|
214 |
+
return None
|
215 |
+
|
216 |
+
return run_async(create_content(
|
217 |
+
db=session,
|
218 |
+
url=url,
|
219 |
+
content=content,
|
220 |
+
title=title,
|
221 |
+
content_type=content_type,
|
222 |
+
source_name=source_name,
|
223 |
+
source_type=source_type,
|
224 |
+
))
|
225 |
+
|
226 |
+
|
227 |
+
def get_dark_web_mentions(
|
228 |
+
page: int = 1,
|
229 |
+
size: int = 10,
|
230 |
+
keyword: Optional[str] = None,
|
231 |
+
content_id: Optional[int] = None,
|
232 |
+
is_verified: Optional[bool] = None,
|
233 |
+
from_date: Optional[datetime] = None,
|
234 |
+
to_date: Optional[datetime] = None,
|
235 |
+
) -> pd.DataFrame:
|
236 |
+
"""
|
237 |
+
Get dark web mentions as a DataFrame.
|
238 |
+
|
239 |
+
Args:
|
240 |
+
page: Page number
|
241 |
+
size: Page size
|
242 |
+
keyword: Filter by keyword
|
243 |
+
content_id: Filter by content ID
|
244 |
+
is_verified: Filter by verification status
|
245 |
+
from_date: Filter by created_at >= from_date
|
246 |
+
to_date: Filter by created_at <= to_date
|
247 |
+
|
248 |
+
Returns:
|
249 |
+
pd.DataFrame: DataFrame with dark web mentions
|
250 |
+
"""
|
251 |
+
session = get_db_session()
|
252 |
+
|
253 |
+
if not session:
|
254 |
+
return pd.DataFrame()
|
255 |
+
|
256 |
+
mentions = run_async(get_mentions(
|
257 |
+
db=session,
|
258 |
+
pagination=PaginationParams(page=page, size=size),
|
259 |
+
keyword=keyword,
|
260 |
+
content_id=content_id,
|
261 |
+
is_verified=is_verified,
|
262 |
+
from_date=from_date,
|
263 |
+
to_date=to_date,
|
264 |
+
))
|
265 |
+
|
266 |
+
if not mentions:
|
267 |
+
return pd.DataFrame()
|
268 |
+
|
269 |
+
# Convert to DataFrame
|
270 |
+
data = []
|
271 |
+
for mention in mentions:
|
272 |
+
data.append({
|
273 |
+
"id": mention.id,
|
274 |
+
"content_id": mention.content_id,
|
275 |
+
"keyword": mention.keyword,
|
276 |
+
"snippet": mention.snippet,
|
277 |
+
"mention_type": mention.mention_type,
|
278 |
+
"confidence": mention.confidence,
|
279 |
+
"is_verified": mention.is_verified,
|
280 |
+
"created_at": mention.created_at,
|
281 |
+
})
|
282 |
+
|
283 |
+
return pd.DataFrame(data)
|
284 |
+
|
285 |
+
|
286 |
+
def add_dark_web_mention(
|
287 |
+
content_id: int,
|
288 |
+
keyword: str,
|
289 |
+
context: Optional[str] = None,
|
290 |
+
snippet: Optional[str] = None,
|
291 |
+
) -> Optional[DarkWebMention]:
|
292 |
+
"""
|
293 |
+
Add a new dark web mention.
|
294 |
+
|
295 |
+
Args:
|
296 |
+
content_id: ID of the content where the mention was found
|
297 |
+
keyword: Keyword that was mentioned
|
298 |
+
context: Text surrounding the mention
|
299 |
+
snippet: Extract of text containing the mention
|
300 |
+
|
301 |
+
Returns:
|
302 |
+
Optional[DarkWebMention]: Created mention or None
|
303 |
+
"""
|
304 |
+
session = get_db_session()
|
305 |
+
|
306 |
+
if not session:
|
307 |
+
return None
|
308 |
+
|
309 |
+
return run_async(create_mention(
|
310 |
+
db=session,
|
311 |
+
content_id=content_id,
|
312 |
+
keyword=keyword,
|
313 |
+
context=context,
|
314 |
+
snippet=snippet,
|
315 |
+
))
|
316 |
+
|
317 |
+
|
318 |
+
# Alerts functions
|
319 |
+
def get_alerts_df(
|
320 |
+
page: int = 1,
|
321 |
+
size: int = 10,
|
322 |
+
severity: Optional[List[ThreatSeverity]] = None,
|
323 |
+
status: Optional[List[AlertStatus]] = None,
|
324 |
+
category: Optional[List[AlertCategory]] = None,
|
325 |
+
is_read: Optional[bool] = None,
|
326 |
+
search_query: Optional[str] = None,
|
327 |
+
from_date: Optional[datetime] = None,
|
328 |
+
to_date: Optional[datetime] = None,
|
329 |
+
) -> pd.DataFrame:
|
330 |
+
"""
|
331 |
+
Get alerts as a DataFrame.
|
332 |
+
|
333 |
+
Args:
|
334 |
+
page: Page number
|
335 |
+
size: Page size
|
336 |
+
severity: Filter by severity
|
337 |
+
status: Filter by status
|
338 |
+
category: Filter by category
|
339 |
+
is_read: Filter by read status
|
340 |
+
search_query: Search in title and description
|
341 |
+
from_date: Filter by generated_at >= from_date
|
342 |
+
to_date: Filter by generated_at <= to_date
|
343 |
+
|
344 |
+
Returns:
|
345 |
+
pd.DataFrame: DataFrame with alerts
|
346 |
+
"""
|
347 |
+
session = get_db_session()
|
348 |
+
|
349 |
+
if not session:
|
350 |
+
return pd.DataFrame()
|
351 |
+
|
352 |
+
alerts = run_async(get_alerts(
|
353 |
+
db=session,
|
354 |
+
pagination=PaginationParams(page=page, size=size),
|
355 |
+
severity=severity,
|
356 |
+
status=status,
|
357 |
+
category=category,
|
358 |
+
is_read=is_read,
|
359 |
+
search_query=search_query,
|
360 |
+
from_date=from_date,
|
361 |
+
to_date=to_date,
|
362 |
+
))
|
363 |
+
|
364 |
+
if not alerts:
|
365 |
+
return pd.DataFrame()
|
366 |
+
|
367 |
+
# Convert to DataFrame
|
368 |
+
data = []
|
369 |
+
for alert in alerts:
|
370 |
+
data.append({
|
371 |
+
"id": alert.id,
|
372 |
+
"title": alert.title,
|
373 |
+
"description": alert.description,
|
374 |
+
"severity": alert.severity.value if alert.severity else None,
|
375 |
+
"status": alert.status.value if alert.status else None,
|
376 |
+
"category": alert.category.value if alert.category else None,
|
377 |
+
"generated_at": alert.generated_at,
|
378 |
+
"source_url": alert.source_url,
|
379 |
+
"is_read": alert.is_read,
|
380 |
+
"threat_id": alert.threat_id,
|
381 |
+
"mention_id": alert.mention_id,
|
382 |
+
"assigned_to_id": alert.assigned_to_id,
|
383 |
+
"action_taken": alert.action_taken,
|
384 |
+
"resolved_at": alert.resolved_at,
|
385 |
+
})
|
386 |
+
|
387 |
+
return pd.DataFrame(data)
|
388 |
+
|
389 |
+
|
390 |
+
def add_alert(
|
391 |
+
title: str,
|
392 |
+
description: str,
|
393 |
+
severity: ThreatSeverity,
|
394 |
+
category: AlertCategory,
|
395 |
+
source_url: Optional[str] = None,
|
396 |
+
threat_id: Optional[int] = None,
|
397 |
+
mention_id: Optional[int] = None,
|
398 |
+
) -> Optional[Alert]:
|
399 |
+
"""
|
400 |
+
Add a new alert.
|
401 |
+
|
402 |
+
Args:
|
403 |
+
title: Alert title
|
404 |
+
description: Alert description
|
405 |
+
severity: Alert severity
|
406 |
+
category: Alert category
|
407 |
+
source_url: Source URL for the alert
|
408 |
+
threat_id: ID of related threat
|
409 |
+
mention_id: ID of related dark web mention
|
410 |
+
|
411 |
+
Returns:
|
412 |
+
Optional[Alert]: Created alert or None
|
413 |
+
"""
|
414 |
+
session = get_db_session()
|
415 |
+
|
416 |
+
if not session:
|
417 |
+
return None
|
418 |
+
|
419 |
+
return run_async(create_alert(
|
420 |
+
db=session,
|
421 |
+
title=title,
|
422 |
+
description=description,
|
423 |
+
severity=severity,
|
424 |
+
category=category,
|
425 |
+
source_url=source_url,
|
426 |
+
threat_id=threat_id,
|
427 |
+
mention_id=mention_id,
|
428 |
+
))
|
429 |
+
|
430 |
+
|
431 |
+
def update_alert(
|
432 |
+
alert_id: int,
|
433 |
+
status: AlertStatus,
|
434 |
+
action_taken: Optional[str] = None,
|
435 |
+
) -> Optional[Alert]:
|
436 |
+
"""
|
437 |
+
Update alert status.
|
438 |
+
|
439 |
+
Args:
|
440 |
+
alert_id: Alert ID
|
441 |
+
status: New status
|
442 |
+
action_taken: Description of action taken
|
443 |
+
|
444 |
+
Returns:
|
445 |
+
Optional[Alert]: Updated alert or None
|
446 |
+
"""
|
447 |
+
session = get_db_session()
|
448 |
+
|
449 |
+
if not session:
|
450 |
+
return None
|
451 |
+
|
452 |
+
return run_async(update_alert_status(
|
453 |
+
db=session,
|
454 |
+
alert_id=alert_id,
|
455 |
+
status=status,
|
456 |
+
action_taken=action_taken,
|
457 |
+
))
|
458 |
+
|
459 |
+
|
460 |
+
def get_alert_severity_counts(
|
461 |
+
from_date: Optional[datetime] = None,
|
462 |
+
to_date: Optional[datetime] = None,
|
463 |
+
) -> Dict[str, int]:
|
464 |
+
"""
|
465 |
+
Get count of alerts by severity.
|
466 |
+
|
467 |
+
Args:
|
468 |
+
from_date: Filter by generated_at >= from_date
|
469 |
+
to_date: Filter by generated_at <= to_date
|
470 |
+
|
471 |
+
Returns:
|
472 |
+
Dict[str, int]: Mapping of severity to count
|
473 |
+
"""
|
474 |
+
session = get_db_session()
|
475 |
+
|
476 |
+
if not session:
|
477 |
+
return {}
|
478 |
+
|
479 |
+
return run_async(get_alert_counts_by_severity(
|
480 |
+
db=session,
|
481 |
+
from_date=from_date,
|
482 |
+
to_date=to_date,
|
483 |
+
))
|
484 |
+
|
485 |
+
|
486 |
+
# Threats functions
|
487 |
+
def get_threats_df(
|
488 |
+
page: int = 1,
|
489 |
+
size: int = 10,
|
490 |
+
severity: Optional[List[ThreatSeverity]] = None,
|
491 |
+
status: Optional[List[ThreatStatus]] = None,
|
492 |
+
category: Optional[List[ThreatCategory]] = None,
|
493 |
+
search_query: Optional[str] = None,
|
494 |
+
from_date: Optional[datetime] = None,
|
495 |
+
to_date: Optional[datetime] = None,
|
496 |
+
) -> pd.DataFrame:
|
497 |
+
"""
|
498 |
+
Get threats as a DataFrame.
|
499 |
+
|
500 |
+
Args:
|
501 |
+
page: Page number
|
502 |
+
size: Page size
|
503 |
+
severity: Filter by severity
|
504 |
+
status: Filter by status
|
505 |
+
category: Filter by category
|
506 |
+
search_query: Search in title and description
|
507 |
+
from_date: Filter by discovered_at >= from_date
|
508 |
+
to_date: Filter by discovered_at <= to_date
|
509 |
+
|
510 |
+
Returns:
|
511 |
+
pd.DataFrame: DataFrame with threats
|
512 |
+
"""
|
513 |
+
session = get_db_session()
|
514 |
+
|
515 |
+
if not session:
|
516 |
+
return pd.DataFrame()
|
517 |
+
|
518 |
+
threats = run_async(get_threats(
|
519 |
+
db=session,
|
520 |
+
pagination=PaginationParams(page=page, size=size),
|
521 |
+
severity=severity,
|
522 |
+
status=status,
|
523 |
+
category=category,
|
524 |
+
search_query=search_query,
|
525 |
+
from_date=from_date,
|
526 |
+
to_date=to_date,
|
527 |
+
))
|
528 |
+
|
529 |
+
if not threats:
|
530 |
+
return pd.DataFrame()
|
531 |
+
|
532 |
+
# Convert to DataFrame
|
533 |
+
data = []
|
534 |
+
for threat in threats:
|
535 |
+
data.append({
|
536 |
+
"id": threat.id,
|
537 |
+
"title": threat.title,
|
538 |
+
"description": threat.description,
|
539 |
+
"severity": threat.severity.value if threat.severity else None,
|
540 |
+
"status": threat.status.value if threat.status else None,
|
541 |
+
"category": threat.category.value if threat.category else None,
|
542 |
+
"source_url": threat.source_url,
|
543 |
+
"source_name": threat.source_name,
|
544 |
+
"source_type": threat.source_type,
|
545 |
+
"discovered_at": threat.discovered_at,
|
546 |
+
"affected_entity": threat.affected_entity,
|
547 |
+
"affected_entity_type": threat.affected_entity_type,
|
548 |
+
"confidence_score": threat.confidence_score,
|
549 |
+
"risk_score": threat.risk_score,
|
550 |
+
})
|
551 |
+
|
552 |
+
return pd.DataFrame(data)
|
553 |
+
|
554 |
+
|
555 |
+
def add_threat(
|
556 |
+
title: str,
|
557 |
+
description: str,
|
558 |
+
severity: ThreatSeverity,
|
559 |
+
category: ThreatCategory,
|
560 |
+
status: ThreatStatus = ThreatStatus.NEW,
|
561 |
+
source_url: Optional[str] = None,
|
562 |
+
source_name: Optional[str] = None,
|
563 |
+
source_type: Optional[str] = None,
|
564 |
+
affected_entity: Optional[str] = None,
|
565 |
+
affected_entity_type: Optional[str] = None,
|
566 |
+
confidence_score: float = 0.0,
|
567 |
+
risk_score: float = 0.0,
|
568 |
+
) -> Optional[Threat]:
|
569 |
+
"""
|
570 |
+
Add a new threat.
|
571 |
+
|
572 |
+
Args:
|
573 |
+
title: Threat title
|
574 |
+
description: Threat description
|
575 |
+
severity: Threat severity
|
576 |
+
category: Threat category
|
577 |
+
status: Threat status
|
578 |
+
source_url: URL of the source
|
579 |
+
source_name: Name of the source
|
580 |
+
source_type: Type of source
|
581 |
+
affected_entity: Name of affected entity
|
582 |
+
affected_entity_type: Type of affected entity
|
583 |
+
confidence_score: Confidence score (0-1)
|
584 |
+
risk_score: Risk score (0-1)
|
585 |
+
|
586 |
+
Returns:
|
587 |
+
Optional[Threat]: Created threat or None
|
588 |
+
"""
|
589 |
+
session = get_db_session()
|
590 |
+
|
591 |
+
if not session:
|
592 |
+
return None
|
593 |
+
|
594 |
+
return run_async(create_threat(
|
595 |
+
db=session,
|
596 |
+
title=title,
|
597 |
+
description=description,
|
598 |
+
severity=severity,
|
599 |
+
category=category,
|
600 |
+
status=status,
|
601 |
+
source_url=source_url,
|
602 |
+
source_name=source_name,
|
603 |
+
source_type=source_type,
|
604 |
+
affected_entity=affected_entity,
|
605 |
+
affected_entity_type=affected_entity_type,
|
606 |
+
confidence_score=confidence_score,
|
607 |
+
risk_score=risk_score,
|
608 |
+
))
|
609 |
+
|
610 |
+
|
611 |
+
def add_indicator(
|
612 |
+
threat_id: int,
|
613 |
+
value: str,
|
614 |
+
indicator_type: IndicatorType,
|
615 |
+
description: Optional[str] = None,
|
616 |
+
is_verified: bool = False,
|
617 |
+
context: Optional[str] = None,
|
618 |
+
source: Optional[str] = None,
|
619 |
+
) -> Optional[Indicator]:
|
620 |
+
"""
|
621 |
+
Add an indicator to a threat.
|
622 |
+
|
623 |
+
Args:
|
624 |
+
threat_id: Threat ID
|
625 |
+
value: Indicator value
|
626 |
+
indicator_type: Indicator type
|
627 |
+
description: Indicator description
|
628 |
+
is_verified: Whether the indicator is verified
|
629 |
+
context: Context of the indicator
|
630 |
+
source: Source of the indicator
|
631 |
+
|
632 |
+
Returns:
|
633 |
+
Optional[Indicator]: Created indicator or None
|
634 |
+
"""
|
635 |
+
session = get_db_session()
|
636 |
+
|
637 |
+
if not session:
|
638 |
+
return None
|
639 |
+
|
640 |
+
return run_async(add_indicator_to_threat(
|
641 |
+
db=session,
|
642 |
+
threat_id=threat_id,
|
643 |
+
value=value,
|
644 |
+
indicator_type=indicator_type,
|
645 |
+
description=description,
|
646 |
+
is_verified=is_verified,
|
647 |
+
context=context,
|
648 |
+
source=source,
|
649 |
+
))
|
650 |
+
|
651 |
+
|
652 |
+
def get_threat_stats(
|
653 |
+
from_date: Optional[datetime] = None,
|
654 |
+
to_date: Optional[datetime] = None,
|
655 |
+
) -> Dict[str, Any]:
|
656 |
+
"""
|
657 |
+
Get threat statistics.
|
658 |
+
|
659 |
+
Args:
|
660 |
+
from_date: Filter by discovered_at >= from_date
|
661 |
+
to_date: Filter by discovered_at <= to_date
|
662 |
+
|
663 |
+
Returns:
|
664 |
+
Dict[str, Any]: Threat statistics
|
665 |
+
"""
|
666 |
+
session = get_db_session()
|
667 |
+
|
668 |
+
if not session:
|
669 |
+
return {}
|
670 |
+
|
671 |
+
return run_async(get_threat_statistics(
|
672 |
+
db=session,
|
673 |
+
from_date=from_date,
|
674 |
+
to_date=to_date,
|
675 |
+
))
|
676 |
+
|
677 |
+
|
678 |
+
# Reports functions
|
679 |
+
def get_reports_df(
|
680 |
+
page: int = 1,
|
681 |
+
size: int = 10,
|
682 |
+
report_type: Optional[List[ReportType]] = None,
|
683 |
+
status: Optional[List[ReportStatus]] = None,
|
684 |
+
severity: Optional[List[ThreatSeverity]] = None,
|
685 |
+
search_query: Optional[str] = None,
|
686 |
+
from_date: Optional[datetime] = None,
|
687 |
+
to_date: Optional[datetime] = None,
|
688 |
+
) -> pd.DataFrame:
|
689 |
+
"""
|
690 |
+
Get reports as a DataFrame.
|
691 |
+
|
692 |
+
Args:
|
693 |
+
page: Page number
|
694 |
+
size: Page size
|
695 |
+
report_type: Filter by report type
|
696 |
+
status: Filter by status
|
697 |
+
severity: Filter by severity
|
698 |
+
search_query: Search in title and summary
|
699 |
+
from_date: Filter by created_at >= from_date
|
700 |
+
to_date: Filter by created_at <= to_date
|
701 |
+
|
702 |
+
Returns:
|
703 |
+
pd.DataFrame: DataFrame with reports
|
704 |
+
"""
|
705 |
+
session = get_db_session()
|
706 |
+
|
707 |
+
if not session:
|
708 |
+
return pd.DataFrame()
|
709 |
+
|
710 |
+
reports = run_async(get_reports(
|
711 |
+
db=session,
|
712 |
+
pagination=PaginationParams(page=page, size=size),
|
713 |
+
report_type=report_type,
|
714 |
+
status=status,
|
715 |
+
severity=severity,
|
716 |
+
search_query=search_query,
|
717 |
+
from_date=from_date,
|
718 |
+
to_date=to_date,
|
719 |
+
))
|
720 |
+
|
721 |
+
if not reports:
|
722 |
+
return pd.DataFrame()
|
723 |
+
|
724 |
+
# Convert to DataFrame
|
725 |
+
data = []
|
726 |
+
for report in reports:
|
727 |
+
data.append({
|
728 |
+
"id": report.id,
|
729 |
+
"report_id": report.report_id,
|
730 |
+
"title": report.title,
|
731 |
+
"summary": report.summary,
|
732 |
+
"report_type": report.report_type.value if report.report_type else None,
|
733 |
+
"status": report.status.value if report.status else None,
|
734 |
+
"severity": report.severity.value if report.severity else None,
|
735 |
+
"publish_date": report.publish_date,
|
736 |
+
"created_at": report.created_at,
|
737 |
+
"time_period_start": report.time_period_start,
|
738 |
+
"time_period_end": report.time_period_end,
|
739 |
+
"author_id": report.author_id,
|
740 |
+
})
|
741 |
+
|
742 |
+
return pd.DataFrame(data)
|
743 |
+
|
744 |
+
|
745 |
+
def add_report(
|
746 |
+
title: str,
|
747 |
+
summary: str,
|
748 |
+
content: str,
|
749 |
+
report_type: ReportType,
|
750 |
+
report_id: str,
|
751 |
+
status: ReportStatus = ReportStatus.DRAFT,
|
752 |
+
severity: Optional[ThreatSeverity] = None,
|
753 |
+
publish_date: Optional[datetime] = None,
|
754 |
+
time_period_start: Optional[datetime] = None,
|
755 |
+
time_period_end: Optional[datetime] = None,
|
756 |
+
keywords: Optional[List[str]] = None,
|
757 |
+
author_id: Optional[int] = None,
|
758 |
+
) -> Optional[Report]:
|
759 |
+
"""
|
760 |
+
Add a new report.
|
761 |
+
|
762 |
+
Args:
|
763 |
+
title: Report title
|
764 |
+
summary: Report summary
|
765 |
+
content: Report content
|
766 |
+
report_type: Type of report
|
767 |
+
report_id: Custom ID for the report
|
768 |
+
status: Report status
|
769 |
+
severity: Report severity
|
770 |
+
publish_date: Publication date
|
771 |
+
time_period_start: Start of time period covered
|
772 |
+
time_period_end: End of time period covered
|
773 |
+
keywords: List of keywords related to the report
|
774 |
+
author_id: ID of the report author
|
775 |
+
|
776 |
+
Returns:
|
777 |
+
Optional[Report]: Created report or None
|
778 |
+
"""
|
779 |
+
session = get_db_session()
|
780 |
+
|
781 |
+
if not session:
|
782 |
+
return None
|
783 |
+
|
784 |
+
return run_async(create_report(
|
785 |
+
db=session,
|
786 |
+
title=title,
|
787 |
+
summary=summary,
|
788 |
+
content=content,
|
789 |
+
report_type=report_type,
|
790 |
+
report_id=report_id,
|
791 |
+
status=status,
|
792 |
+
severity=severity,
|
793 |
+
publish_date=publish_date,
|
794 |
+
time_period_start=time_period_start,
|
795 |
+
time_period_end=time_period_end,
|
796 |
+
keywords=keywords,
|
797 |
+
author_id=author_id,
|
798 |
+
))
|
799 |
+
|
800 |
+
|
801 |
+
# Helper functions
|
802 |
+
def get_time_range_dates(time_range: str) -> Tuple[datetime, datetime]:
|
803 |
+
"""
|
804 |
+
Get start and end dates for a time range.
|
805 |
+
|
806 |
+
Args:
|
807 |
+
time_range: Time range string (e.g., "Last 7 Days")
|
808 |
+
|
809 |
+
Returns:
|
810 |
+
Tuple[datetime, datetime]: (start_date, end_date)
|
811 |
+
"""
|
812 |
+
end_date = datetime.utcnow()
|
813 |
+
|
814 |
+
if time_range == "Last 24 Hours":
|
815 |
+
start_date = end_date - timedelta(days=1)
|
816 |
+
elif time_range == "Last 7 Days":
|
817 |
+
start_date = end_date - timedelta(days=7)
|
818 |
+
elif time_range == "Last 30 Days":
|
819 |
+
start_date = end_date - timedelta(days=30)
|
820 |
+
elif time_range == "Last Quarter":
|
821 |
+
start_date = end_date - timedelta(days=90)
|
822 |
+
else: # Default to last 30 days
|
823 |
+
start_date = end_date - timedelta(days=30)
|
824 |
+
|
825 |
+
return start_date, end_date
|
826 |
+
|
827 |
+
|
828 |
+
# Initialize database connection
|
829 |
+
def init_db_connection():
|
830 |
+
"""Initialize database connection and check if tables exist."""
|
831 |
+
session = get_db_session()
|
832 |
+
|
833 |
+
if not session:
|
834 |
+
return False
|
835 |
+
|
836 |
+
# Check if tables exist
|
837 |
+
from sqlalchemy.future import select
|
838 |
+
|
839 |
+
try:
|
840 |
+
# Try to query if tables exist using SQLAlchemy text()
|
841 |
+
from sqlalchemy import text
|
842 |
+
query = text("SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = 'users')")
|
843 |
+
result = run_async(session.execute(query))
|
844 |
+
exists = result.scalar()
|
845 |
+
|
846 |
+
return exists
|
847 |
+
except Exception as e:
|
848 |
+
# Tables might not exist yet
|
849 |
+
print(f"Error checking database: {e}")
|
850 |
+
return False
|
src/streamlit_subscription_services.py
ADDED
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Streamlit integration for subscription services.
|
3 |
+
"""
|
4 |
+
import os
|
5 |
+
import asyncio
|
6 |
+
import pandas as pd
|
7 |
+
from typing import List, Dict, Any, Optional, Union
|
8 |
+
from datetime import datetime
|
9 |
+
|
10 |
+
import stripe
|
11 |
+
from sqlalchemy.ext.asyncio import AsyncSession
|
12 |
+
|
13 |
+
from src.models.subscription import (
|
14 |
+
SubscriptionPlan, UserSubscription, PaymentHistory,
|
15 |
+
SubscriptionTier, BillingPeriod, SubscriptionStatus, PaymentStatus
|
16 |
+
)
|
17 |
+
from src.api.services.subscription_service import (
|
18 |
+
get_subscription_plans, get_subscription_plan_by_id, get_subscription_plan_by_tier,
|
19 |
+
create_subscription_plan, update_subscription_plan,
|
20 |
+
get_user_subscription, get_user_subscription_by_id,
|
21 |
+
create_user_subscription, cancel_user_subscription
|
22 |
+
)
|
23 |
+
|
24 |
+
from src.streamlit_database import run_async, get_db_session
|
25 |
+
|
26 |
+
# Set up Stripe API keys for client-side usage
|
27 |
+
STRIPE_PUBLISHABLE_KEY = os.environ.get("STRIPE_PUBLISHABLE_KEY")
|
28 |
+
|
29 |
+
def get_subscription_plans_df(active_only: bool = True) -> pd.DataFrame:
|
30 |
+
"""
|
31 |
+
Get all subscription plans as a DataFrame.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
active_only: If True, only return active plans
|
35 |
+
|
36 |
+
Returns:
|
37 |
+
DataFrame containing subscription plans
|
38 |
+
"""
|
39 |
+
session = get_db_session()
|
40 |
+
|
41 |
+
if not session:
|
42 |
+
return pd.DataFrame()
|
43 |
+
|
44 |
+
plans = run_async(get_subscription_plans(session, active_only))
|
45 |
+
|
46 |
+
if not plans:
|
47 |
+
return pd.DataFrame()
|
48 |
+
|
49 |
+
data = []
|
50 |
+
for plan in plans:
|
51 |
+
data.append({
|
52 |
+
"id": plan.id,
|
53 |
+
"name": plan.name,
|
54 |
+
"tier": plan.tier.value if plan.tier else None,
|
55 |
+
"description": plan.description,
|
56 |
+
"price_monthly": plan.price_monthly,
|
57 |
+
"price_annually": plan.price_annually,
|
58 |
+
"max_alerts": plan.max_alerts,
|
59 |
+
"max_reports": plan.max_reports,
|
60 |
+
"max_searches_per_day": plan.max_searches_per_day,
|
61 |
+
"max_monitoring_keywords": plan.max_monitoring_keywords,
|
62 |
+
"max_data_retention_days": plan.max_data_retention_days,
|
63 |
+
"supports_api_access": plan.supports_api_access,
|
64 |
+
"supports_live_feed": plan.supports_live_feed,
|
65 |
+
"supports_dark_web_monitoring": plan.supports_dark_web_monitoring,
|
66 |
+
"supports_export": plan.supports_export,
|
67 |
+
"supports_advanced_analytics": plan.supports_advanced_analytics,
|
68 |
+
"is_active": plan.is_active,
|
69 |
+
})
|
70 |
+
|
71 |
+
return pd.DataFrame(data)
|
72 |
+
|
73 |
+
|
74 |
+
def get_subscription_plan(plan_id: int) -> Optional[Dict[str, Any]]:
|
75 |
+
"""
|
76 |
+
Get a subscription plan by ID.
|
77 |
+
|
78 |
+
Args:
|
79 |
+
plan_id: ID of the plan to get
|
80 |
+
|
81 |
+
Returns:
|
82 |
+
Dictionary containing plan details or None if not found
|
83 |
+
"""
|
84 |
+
session = get_db_session()
|
85 |
+
|
86 |
+
if not session:
|
87 |
+
return None
|
88 |
+
|
89 |
+
plan = run_async(get_subscription_plan_by_id(session, plan_id))
|
90 |
+
|
91 |
+
if not plan:
|
92 |
+
return None
|
93 |
+
|
94 |
+
return {
|
95 |
+
"id": plan.id,
|
96 |
+
"name": plan.name,
|
97 |
+
"tier": plan.tier.value if plan.tier else None,
|
98 |
+
"description": plan.description,
|
99 |
+
"price_monthly": plan.price_monthly,
|
100 |
+
"price_annually": plan.price_annually,
|
101 |
+
"max_alerts": plan.max_alerts,
|
102 |
+
"max_reports": plan.max_reports,
|
103 |
+
"max_searches_per_day": plan.max_searches_per_day,
|
104 |
+
"max_monitoring_keywords": plan.max_monitoring_keywords,
|
105 |
+
"max_data_retention_days": plan.max_data_retention_days,
|
106 |
+
"supports_api_access": plan.supports_api_access,
|
107 |
+
"supports_live_feed": plan.supports_live_feed,
|
108 |
+
"supports_dark_web_monitoring": plan.supports_dark_web_monitoring,
|
109 |
+
"supports_export": plan.supports_export,
|
110 |
+
"supports_advanced_analytics": plan.supports_advanced_analytics,
|
111 |
+
"is_active": plan.is_active,
|
112 |
+
"stripe_product_id": plan.stripe_product_id,
|
113 |
+
"stripe_monthly_price_id": plan.stripe_monthly_price_id,
|
114 |
+
"stripe_annual_price_id": plan.stripe_annual_price_id,
|
115 |
+
}
|
116 |
+
|
117 |
+
|
118 |
+
def get_user_current_subscription(user_id: int) -> Optional[Dict[str, Any]]:
|
119 |
+
"""
|
120 |
+
Get a user's current subscription.
|
121 |
+
|
122 |
+
Args:
|
123 |
+
user_id: ID of the user
|
124 |
+
|
125 |
+
Returns:
|
126 |
+
Dictionary containing subscription details or None if not found
|
127 |
+
"""
|
128 |
+
session = get_db_session()
|
129 |
+
|
130 |
+
if not session:
|
131 |
+
return None
|
132 |
+
|
133 |
+
subscription = run_async(get_user_subscription(session, user_id))
|
134 |
+
|
135 |
+
if not subscription:
|
136 |
+
return None
|
137 |
+
|
138 |
+
plan = subscription.plan
|
139 |
+
|
140 |
+
return {
|
141 |
+
"id": subscription.id,
|
142 |
+
"user_id": subscription.user_id,
|
143 |
+
"plan_id": subscription.plan_id,
|
144 |
+
"plan_name": plan.name if plan else None,
|
145 |
+
"plan_tier": plan.tier.value if plan and plan.tier else None,
|
146 |
+
"status": subscription.status.value if subscription.status else None,
|
147 |
+
"billing_period": subscription.billing_period.value if subscription.billing_period else None,
|
148 |
+
"current_period_start": subscription.current_period_start,
|
149 |
+
"current_period_end": subscription.current_period_end,
|
150 |
+
"stripe_subscription_id": subscription.stripe_subscription_id,
|
151 |
+
"stripe_customer_id": subscription.stripe_customer_id,
|
152 |
+
"created_at": subscription.created_at,
|
153 |
+
"canceled_at": subscription.canceled_at,
|
154 |
+
}
|
155 |
+
|
156 |
+
|
157 |
+
def create_new_subscription_plan(
|
158 |
+
name: str,
|
159 |
+
tier: str,
|
160 |
+
description: str,
|
161 |
+
price_monthly: float,
|
162 |
+
price_annually: float,
|
163 |
+
max_alerts: int = 10,
|
164 |
+
max_reports: int = 5,
|
165 |
+
max_searches_per_day: int = 20,
|
166 |
+
max_monitoring_keywords: int = 10,
|
167 |
+
max_data_retention_days: int = 30,
|
168 |
+
supports_api_access: bool = False,
|
169 |
+
supports_live_feed: bool = False,
|
170 |
+
supports_dark_web_monitoring: bool = False,
|
171 |
+
supports_export: bool = False,
|
172 |
+
supports_advanced_analytics: bool = False,
|
173 |
+
create_stripe_product: bool = True
|
174 |
+
) -> Optional[Dict[str, Any]]:
|
175 |
+
"""
|
176 |
+
Create a new subscription plan.
|
177 |
+
|
178 |
+
Args:
|
179 |
+
name: Name of the plan
|
180 |
+
tier: Tier of the plan (must be one of "free", "basic", "professional", "enterprise")
|
181 |
+
description: Description of the plan
|
182 |
+
price_monthly: Monthly price of the plan
|
183 |
+
price_annually: Annual price of the plan
|
184 |
+
max_alerts: Maximum number of alerts allowed
|
185 |
+
max_reports: Maximum number of reports allowed
|
186 |
+
max_searches_per_day: Maximum number of searches per day
|
187 |
+
max_monitoring_keywords: Maximum number of monitoring keywords
|
188 |
+
max_data_retention_days: Maximum number of days to retain data
|
189 |
+
supports_api_access: Whether the plan supports API access
|
190 |
+
supports_live_feed: Whether the plan supports live feed
|
191 |
+
supports_dark_web_monitoring: Whether the plan supports dark web monitoring
|
192 |
+
supports_export: Whether the plan supports data export
|
193 |
+
supports_advanced_analytics: Whether the plan supports advanced analytics
|
194 |
+
create_stripe_product: Whether to create a Stripe product for this plan
|
195 |
+
|
196 |
+
Returns:
|
197 |
+
Dictionary containing plan details or None if creation failed
|
198 |
+
"""
|
199 |
+
session = get_db_session()
|
200 |
+
|
201 |
+
if not session:
|
202 |
+
return None
|
203 |
+
|
204 |
+
try:
|
205 |
+
# Convert tier string to enum
|
206 |
+
tier_enum = SubscriptionTier(tier.lower())
|
207 |
+
except ValueError:
|
208 |
+
return None
|
209 |
+
|
210 |
+
plan = run_async(create_subscription_plan(
|
211 |
+
db=session,
|
212 |
+
name=name,
|
213 |
+
tier=tier_enum,
|
214 |
+
description=description,
|
215 |
+
price_monthly=price_monthly,
|
216 |
+
price_annually=price_annually,
|
217 |
+
max_alerts=max_alerts,
|
218 |
+
max_reports=max_reports,
|
219 |
+
max_searches_per_day=max_searches_per_day,
|
220 |
+
max_monitoring_keywords=max_monitoring_keywords,
|
221 |
+
max_data_retention_days=max_data_retention_days,
|
222 |
+
supports_api_access=supports_api_access,
|
223 |
+
supports_live_feed=supports_live_feed,
|
224 |
+
supports_dark_web_monitoring=supports_dark_web_monitoring,
|
225 |
+
supports_export=supports_export,
|
226 |
+
supports_advanced_analytics=supports_advanced_analytics,
|
227 |
+
create_stripe_product=create_stripe_product
|
228 |
+
))
|
229 |
+
|
230 |
+
if not plan:
|
231 |
+
return None
|
232 |
+
|
233 |
+
return {
|
234 |
+
"id": plan.id,
|
235 |
+
"name": plan.name,
|
236 |
+
"tier": plan.tier.value if plan.tier else None,
|
237 |
+
"description": plan.description,
|
238 |
+
"price_monthly": plan.price_monthly,
|
239 |
+
"price_annually": plan.price_annually,
|
240 |
+
"max_alerts": plan.max_alerts,
|
241 |
+
"max_reports": plan.max_reports,
|
242 |
+
"max_searches_per_day": plan.max_searches_per_day,
|
243 |
+
"max_monitoring_keywords": plan.max_monitoring_keywords,
|
244 |
+
"max_data_retention_days": plan.max_data_retention_days,
|
245 |
+
"supports_api_access": plan.supports_api_access,
|
246 |
+
"supports_live_feed": plan.supports_live_feed,
|
247 |
+
"supports_dark_web_monitoring": plan.supports_dark_web_monitoring,
|
248 |
+
"supports_export": plan.supports_export,
|
249 |
+
"supports_advanced_analytics": plan.supports_advanced_analytics,
|
250 |
+
"is_active": plan.is_active,
|
251 |
+
"stripe_product_id": plan.stripe_product_id,
|
252 |
+
"stripe_monthly_price_id": plan.stripe_monthly_price_id,
|
253 |
+
"stripe_annual_price_id": plan.stripe_annual_price_id,
|
254 |
+
}
|
255 |
+
|
256 |
+
|
257 |
+
def subscribe_user_to_plan(
|
258 |
+
user_id: int,
|
259 |
+
plan_id: int,
|
260 |
+
billing_period: str = "monthly",
|
261 |
+
create_stripe_subscription: bool = True,
|
262 |
+
payment_method_id: Optional[str] = None
|
263 |
+
) -> Optional[Dict[str, Any]]:
|
264 |
+
"""
|
265 |
+
Subscribe a user to a plan.
|
266 |
+
|
267 |
+
Args:
|
268 |
+
user_id: ID of the user
|
269 |
+
plan_id: ID of the plan
|
270 |
+
billing_period: Billing period ("monthly" or "annually")
|
271 |
+
create_stripe_subscription: Whether to create a Stripe subscription
|
272 |
+
payment_method_id: ID of the payment method to use (required if create_stripe_subscription is True)
|
273 |
+
|
274 |
+
Returns:
|
275 |
+
Dictionary containing subscription details or None if creation failed
|
276 |
+
"""
|
277 |
+
session = get_db_session()
|
278 |
+
|
279 |
+
if not session:
|
280 |
+
return None
|
281 |
+
|
282 |
+
try:
|
283 |
+
# Convert billing period string to enum
|
284 |
+
billing_period_enum = BillingPeriod(billing_period.lower())
|
285 |
+
except ValueError:
|
286 |
+
return None
|
287 |
+
|
288 |
+
subscription = run_async(create_user_subscription(
|
289 |
+
db=session,
|
290 |
+
user_id=user_id,
|
291 |
+
plan_id=plan_id,
|
292 |
+
billing_period=billing_period_enum,
|
293 |
+
create_stripe_subscription=create_stripe_subscription,
|
294 |
+
payment_method_id=payment_method_id
|
295 |
+
))
|
296 |
+
|
297 |
+
if not subscription:
|
298 |
+
return None
|
299 |
+
|
300 |
+
plan = subscription.plan
|
301 |
+
|
302 |
+
return {
|
303 |
+
"id": subscription.id,
|
304 |
+
"user_id": subscription.user_id,
|
305 |
+
"plan_id": subscription.plan_id,
|
306 |
+
"plan_name": plan.name if plan else None,
|
307 |
+
"plan_tier": plan.tier.value if plan and plan.tier else None,
|
308 |
+
"status": subscription.status.value if subscription.status else None,
|
309 |
+
"billing_period": subscription.billing_period.value if subscription.billing_period else None,
|
310 |
+
"current_period_start": subscription.current_period_start,
|
311 |
+
"current_period_end": subscription.current_period_end,
|
312 |
+
"stripe_subscription_id": subscription.stripe_subscription_id,
|
313 |
+
"stripe_customer_id": subscription.stripe_customer_id,
|
314 |
+
"created_at": subscription.created_at,
|
315 |
+
}
|
316 |
+
|
317 |
+
|
318 |
+
def cancel_subscription(
|
319 |
+
subscription_id: int,
|
320 |
+
cancel_stripe_subscription: bool = True
|
321 |
+
) -> Optional[Dict[str, Any]]:
|
322 |
+
"""
|
323 |
+
Cancel a subscription.
|
324 |
+
|
325 |
+
Args:
|
326 |
+
subscription_id: ID of the subscription to cancel
|
327 |
+
cancel_stripe_subscription: Whether to cancel the Stripe subscription
|
328 |
+
|
329 |
+
Returns:
|
330 |
+
Dictionary containing subscription details or None if cancellation failed
|
331 |
+
"""
|
332 |
+
session = get_db_session()
|
333 |
+
|
334 |
+
if not session:
|
335 |
+
return None
|
336 |
+
|
337 |
+
subscription = run_async(cancel_user_subscription(
|
338 |
+
db=session,
|
339 |
+
subscription_id=subscription_id,
|
340 |
+
cancel_stripe_subscription=cancel_stripe_subscription
|
341 |
+
))
|
342 |
+
|
343 |
+
if not subscription:
|
344 |
+
return None
|
345 |
+
|
346 |
+
plan = subscription.plan
|
347 |
+
|
348 |
+
return {
|
349 |
+
"id": subscription.id,
|
350 |
+
"user_id": subscription.user_id,
|
351 |
+
"plan_id": subscription.plan_id,
|
352 |
+
"plan_name": plan.name if plan else None,
|
353 |
+
"plan_tier": plan.tier.value if plan and plan.tier else None,
|
354 |
+
"status": subscription.status.value if subscription.status else None,
|
355 |
+
"billing_period": subscription.billing_period.value if subscription.billing_period else None,
|
356 |
+
"current_period_start": subscription.current_period_start,
|
357 |
+
"current_period_end": subscription.current_period_end,
|
358 |
+
"stripe_subscription_id": subscription.stripe_subscription_id,
|
359 |
+
"stripe_customer_id": subscription.stripe_customer_id,
|
360 |
+
"created_at": subscription.created_at,
|
361 |
+
"canceled_at": subscription.canceled_at,
|
362 |
+
}
|
363 |
+
|
364 |
+
|
365 |
+
def initialize_default_plans():
|
366 |
+
"""Initialize default subscription plans if they don't exist."""
|
367 |
+
# Get existing plans
|
368 |
+
plans_df = get_subscription_plans_df(active_only=False)
|
369 |
+
|
370 |
+
if not plans_df.empty:
|
371 |
+
# Plans already exist
|
372 |
+
return
|
373 |
+
|
374 |
+
# Create default plans
|
375 |
+
# Free tier
|
376 |
+
create_new_subscription_plan(
|
377 |
+
name="Free",
|
378 |
+
tier="free",
|
379 |
+
description="Basic access to the platform with limited features. Perfect for individuals or small teams starting with OSINT.",
|
380 |
+
price_monthly=0.0,
|
381 |
+
price_annually=0.0,
|
382 |
+
max_alerts=5,
|
383 |
+
max_reports=2,
|
384 |
+
max_searches_per_day=10,
|
385 |
+
max_monitoring_keywords=5,
|
386 |
+
max_data_retention_days=7,
|
387 |
+
supports_api_access=False,
|
388 |
+
supports_live_feed=False,
|
389 |
+
supports_dark_web_monitoring=False,
|
390 |
+
supports_export=False,
|
391 |
+
supports_advanced_analytics=False,
|
392 |
+
create_stripe_product=False # No need to create Stripe product for free tier
|
393 |
+
)
|
394 |
+
|
395 |
+
# Basic tier
|
396 |
+
create_new_subscription_plan(
|
397 |
+
name="Basic",
|
398 |
+
tier="basic",
|
399 |
+
description="Enhanced access with more features. Ideal for small businesses and security teams requiring regular threat intelligence.",
|
400 |
+
price_monthly=29.99,
|
401 |
+
price_annually=299.99,
|
402 |
+
max_alerts=20,
|
403 |
+
max_reports=10,
|
404 |
+
max_searches_per_day=50,
|
405 |
+
max_monitoring_keywords=25,
|
406 |
+
max_data_retention_days=30,
|
407 |
+
supports_api_access=False,
|
408 |
+
supports_live_feed=True,
|
409 |
+
supports_dark_web_monitoring=True,
|
410 |
+
supports_export=True,
|
411 |
+
supports_advanced_analytics=False
|
412 |
+
)
|
413 |
+
|
414 |
+
# Professional tier
|
415 |
+
create_new_subscription_plan(
|
416 |
+
name="Professional",
|
417 |
+
tier="professional",
|
418 |
+
description="Comprehensive access for professional users. Perfect for medium-sized organizations requiring advanced threat intelligence capabilities.",
|
419 |
+
price_monthly=99.99,
|
420 |
+
price_annually=999.99,
|
421 |
+
max_alerts=100,
|
422 |
+
max_reports=50,
|
423 |
+
max_searches_per_day=200,
|
424 |
+
max_monitoring_keywords=100,
|
425 |
+
max_data_retention_days=90,
|
426 |
+
supports_api_access=True,
|
427 |
+
supports_live_feed=True,
|
428 |
+
supports_dark_web_monitoring=True,
|
429 |
+
supports_export=True,
|
430 |
+
supports_advanced_analytics=True
|
431 |
+
)
|
432 |
+
|
433 |
+
# Enterprise tier
|
434 |
+
create_new_subscription_plan(
|
435 |
+
name="Enterprise",
|
436 |
+
tier="enterprise",
|
437 |
+
description="Full access to all features with unlimited usage. Designed for large organizations with sophisticated threat intelligence requirements.",
|
438 |
+
price_monthly=249.99,
|
439 |
+
price_annually=2499.99,
|
440 |
+
max_alerts=0, # Unlimited
|
441 |
+
max_reports=0, # Unlimited
|
442 |
+
max_searches_per_day=0, # Unlimited
|
443 |
+
max_monitoring_keywords=0, # Unlimited
|
444 |
+
max_data_retention_days=365,
|
445 |
+
supports_api_access=True,
|
446 |
+
supports_live_feed=True,
|
447 |
+
supports_dark_web_monitoring=True,
|
448 |
+
supports_export=True,
|
449 |
+
supports_advanced_analytics=True
|
450 |
+
)
|