sajjadahmad commited on
Commit
059192a
Β·
verified Β·
1 Parent(s): 3f3b728

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +371 -0
app.py CHANGED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import pandas as pd
4
+ import torch
5
+ import tensorflow as tf
6
+ import cv2
7
+ import tempfile
8
+ from PIL import Image
9
+ from ultralytics import YOLO
10
+ from transformers import AutoImageProcessor, AutoModelForImageClassification, pipeline
11
+ import matplotlib.pyplot as plt
12
+ import time
13
+ from datetime import datetime
14
+ import os
15
+ import requests
16
+ from torchvision import transforms
17
+ from torchvision.models import mobilenet_v3_large, MobileNet_V3_Large_Weights
18
+ import torch.nn.functional as F
19
+
20
+ # Load Models
21
+ species_processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
22
+ species_model = AutoModelForImageClassification.from_pretrained("microsoft/resnet-50").eval()
23
+ yolo_model = YOLO("yolov8x.pt")
24
+ threat_model = pipeline("image-classification", model="nateraw/vit-base-beans")
25
+
26
+ # Habitat Analysis Model
27
+ class HabitatAnalyzer:
28
+ def __init__(self):
29
+ self.CLASSES = ['vegetation', 'water', 'urban', 'barren']
30
+
31
+ def analyze_vegetation(self, image_array):
32
+ ndvi = (image_array[:, :, 3] - image_array[:, :, 0]) / (image_array[:, :, 3] + image_array[:, :, 0] + 1e-8)
33
+ return ndvi
34
+
35
+ def detect_land_changes(self, image1, image2):
36
+ return cv2.absdiff(image1, image2)
37
+
38
+ class SpeciesMonitoringSystem:
39
+ def __init__(self):
40
+ self.detection_model = mobilenet_v3_large(weights=MobileNet_V3_Large_Weights.DEFAULT)
41
+ self.detection_model.eval()
42
+
43
+ self.species_classes = [
44
+ 'deer', 'elk', 'moose', 'bear', 'wolf', 'mountain lion', 'bobcat',
45
+ 'lynx', 'bighorn sheep', 'bison', 'wild boar', 'caribou', 'antelope',
46
+ 'coyote', 'jaguar', 'leopard', 'tiger', 'lion', 'gorilla', 'chimpanzee',
47
+ 'fox', 'raccoon', 'beaver', 'badger', 'otter', 'wolverine', 'porcupine',
48
+ 'skunk', 'opossum', 'armadillo', 'wild cat', 'jackal', 'hyena',
49
+ 'marten', 'fisher', 'weasel', 'mink', 'coati', 'monkey', 'lemur',
50
+ 'rabbit', 'squirrel', 'chipmunk', 'rat', 'mouse', 'vole', 'mole',
51
+ 'shrew', 'bat', 'hedgehog', 'gopher', 'prairie dog', 'muskrat',
52
+ 'hamster', 'guinea pig', 'ferret', 'chinchilla', 'dormouse',
53
+ 'eagle', 'hawk', 'falcon', 'owl', 'vulture', 'condor', 'crow', 'raven',
54
+ 'woodpecker', 'duck', 'goose', 'swan', 'heron', 'crane', 'stork',
55
+ 'pelican', 'flamingo', 'penguin', 'ostrich', 'emu', 'kiwi', 'peacock',
56
+ 'pheasant', 'quail', 'grouse', 'turkey', 'cardinal', 'bluejay',
57
+ 'sparrow', 'finch', 'warbler', 'thrush', 'swallow', 'hummingbird',
58
+ 'snake', 'lizard', 'turtle', 'tortoise', 'alligator', 'crocodile',
59
+ 'iguana', 'gecko', 'monitor lizard', 'chameleon', 'python', 'cobra',
60
+ 'viper', 'rattlesnake', 'boa', 'anaconda', 'skink', 'bearded dragon',
61
+ 'frog', 'toad', 'salamander', 'newt', 'axolotl', 'caecilian',
62
+ 'tree frog', 'bullfrog', 'fire salamander', 'spotted salamander',
63
+ 'salmon', 'trout', 'bass', 'pike', 'catfish', 'carp', 'perch',
64
+ 'tuna', 'swordfish', 'marlin', 'shark', 'ray', 'eel', 'sturgeon',
65
+ 'barracuda', 'grouper', 'snapper', 'cod', 'halibut', 'flounder',
66
+ 'whale', 'dolphin', 'porpoise', 'seal', 'sea lion', 'walrus',
67
+ 'orca', 'narwhal', 'beluga', 'manatee', 'dugong', 'sea otter',
68
+ 'butterfly', 'moth', 'beetle', 'ant', 'bee', 'wasp', 'spider',
69
+ 'scorpion', 'centipede', 'millipede', 'crab', 'lobster', 'shrimp',
70
+ 'octopus', 'squid', 'jellyfish', 'starfish', 'sea urchin', 'coral',
71
+ 'snail', 'slug', 'earthworm', 'leech'
72
+ ]
73
+
74
+ self.transform = transforms.Compose([
75
+ transforms.Resize((224, 224)),
76
+ transforms.ToTensor(),
77
+ transforms.Normalize(mean=[0.485, 0.456, 0.406],
78
+ std=[0.229, 0.224, 0.225])
79
+ ])
80
+
81
+ def detect_species(self, image):
82
+ img_tensor = self.transform(image).unsqueeze(0)
83
+
84
+ with torch.no_grad():
85
+ outputs = self.detection_model(img_tensor)
86
+ probabilities = F.softmax(outputs, dim=1)
87
+
88
+ top_prob, top_class = torch.topk(probabilities, 3)
89
+ results = []
90
+
91
+ for i in range(3):
92
+ species = self.species_classes[top_class[0][i] % len(self.species_classes)]
93
+ confidence = top_prob[0][i].item() * 100
94
+ results.append((species, confidence))
95
+
96
+ return results
97
+
98
+ def count_population(self, image):
99
+ gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
100
+ blur = cv2.GaussianBlur(gray, (5, 5), 0)
101
+ _, thresh = cv2.threshold(blur, 127, 255, cv2.THRESH_BINARY)
102
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
103
+
104
+ img_with_contours = np.array(image).copy()
105
+ cv2.drawContours(img_with_contours, contours, -1, (0, 255, 0), 2)
106
+
107
+ return len(contours), Image.fromarray(img_with_contours)
108
+
109
+ def assess_health(self, image):
110
+ img_array = np.array(image)
111
+ avg_color = np.mean(img_array, axis=(0, 1))
112
+ texture_measure = np.std(img_array)
113
+ color_variation = np.std(avg_color)
114
+
115
+ color_score = np.mean(avg_color) / 255 * 100
116
+ texture_score = min(100, texture_measure / 2)
117
+ variation_score = min(100, color_variation * 2)
118
+
119
+ health_score = (color_score * 0.4 + texture_score * 0.3 + variation_score * 0.3)
120
+
121
+ if health_score > 80:
122
+ status = "Excellent"
123
+ elif health_score > 60:
124
+ status = "Good"
125
+ elif health_score > 40:
126
+ status = "Fair"
127
+ else:
128
+ status = "Poor"
129
+
130
+ indicators = {
131
+ "Color Vibrancy": color_score,
132
+ "Texture Complexity": texture_score,
133
+ "Pattern Variation": variation_score
134
+ }
135
+
136
+ return status, health_score, indicators
137
+
138
+ def detect_threat(image, labels):
139
+ results = threat_model(image)
140
+ for result in results:
141
+ if result['label'] in labels and result['score'] > 0.5:
142
+ return f"{result['label']} Detected with confidence {result['score']:.2f}"
143
+ return "No Threat Detected"
144
+
145
+ def detect_land_changes(image1_path, image2_path):
146
+ image1 = Image.open(image1_path)
147
+ image2 = Image.open(image2_path)
148
+
149
+ image_array1 = np.array(image1)
150
+ image_array2 = np.array(image2)
151
+
152
+ if image_array1.shape != image_array2.shape:
153
+ return "Error: Images must be the same size."
154
+
155
+ changes = cv2.absdiff(image_array1, image_array2)
156
+
157
+ col1, col2, col3 = st.columns(3)
158
+ with col1:
159
+ st.image(image1, caption="Image 1")
160
+ with col2:
161
+ st.image(image2, caption="Image 2")
162
+ with col3:
163
+ st.image(changes, caption="Changes Detected")
164
+
165
+ change_percent = np.sum(changes > 50) / changes.size * 100
166
+ st.write(f"Changed Area: {change_percent:.2f}%")
167
+
168
+ return changes
169
+
170
+ def main():
171
+ habitat_analyzer = HabitatAnalyzer()
172
+
173
+ st.sidebar.title("Navigation")
174
+ option = st.sidebar.radio("Select an Analysis Type:",
175
+ ["Species Monitoring", "Land Change Detection", "Animal Monitoring", "Threat Detection"])
176
+
177
+ if option == "Species Monitoring":
178
+ st.title("Species Identification")
179
+ monitoring_system = SpeciesMonitoringSystem()
180
+
181
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
182
+
183
+ if uploaded_file is not None:
184
+ image = Image.open(uploaded_file)
185
+ st.image(image, caption="Uploaded Image", use_column_width=True)
186
+
187
+ progress_bar = st.progress(0)
188
+
189
+ with st.spinner("Analyzing image..."):
190
+ col1, col2, col3 = st.columns(3)
191
+
192
+ progress_bar.progress(30)
193
+ species_results = monitoring_system.detect_species(image)
194
+
195
+ progress_bar.progress(60)
196
+ count, marked_image = monitoring_system.count_population(image)
197
+
198
+ progress_bar.progress(90)
199
+ health_status, health_score, health_indicators = monitoring_system.assess_health(image)
200
+
201
+ with col1:
202
+ st.subheader("πŸ” Species Detection")
203
+ for species, confidence in species_results:
204
+ st.write(f"**{species.title()}**")
205
+ st.progress(confidence/100)
206
+ st.caption(f"Confidence: {confidence:.1f}%")
207
+
208
+ with col2:
209
+ st.subheader("πŸ‘₯ Population Count")
210
+ st.write(f"**Detected Animals:** {count}")
211
+ st.image(marked_image, caption="Detection Visualization", use_column_width=True)
212
+
213
+ with col3:
214
+ st.subheader("πŸ’ͺ Health Assessment")
215
+ st.write(f"**Status:** {health_status}")
216
+ st.write(f"**Overall Score:** {health_score:.1f}/100")
217
+
218
+ for indicator, value in health_indicators.items():
219
+ st.write(f"**{indicator}:**")
220
+ st.progress(value/100)
221
+ st.caption(f"{value:.1f}%")
222
+
223
+ progress_bar.progress(100)
224
+
225
+ st.sidebar.markdown("---")
226
+ st.sidebar.markdown("### Analysis Details")
227
+ st.sidebar.text(f"Analyzed at: {time.strftime('%Y-%m-%d %H:%M:%S')}")
228
+ st.sidebar.text(f"Image size: {image.size}")
229
+
230
+ st.markdown("---")
231
+ st.subheader("πŸ“Š Export Results")
232
+
233
+ summary = f"""Wildlife Monitoring Analysis Report
234
+ Date: {time.strftime('%Y-%m-%d %H:%M:%S')}
235
+
236
+ Species Detection Results:
237
+ {'-' * 30}
238
+ """
239
+ for species, confidence in species_results:
240
+ summary += f"\n{species.title()}: {confidence:.1f}% confidence"
241
+
242
+ summary += f"""\n\nPopulation Count:
243
+ {'-' * 30}
244
+ Total detected: {count} individuals
245
+
246
+ Health Assessment:
247
+ {'-' * 30}
248
+ Status: {health_status}
249
+ Overall Score: {health_score:.1f}/100
250
+ """
251
+ for indicator, value in health_indicators.items():
252
+ summary += f"\n{indicator}: {value:.1f}%"
253
+
254
+ st.download_button(
255
+ label="Download Analysis Report",
256
+ data=summary,
257
+ file_name="wildlife_analysis_report.txt",
258
+ mime="text/plain"
259
+ )
260
+
261
+ elif option == "Land Change Detection":
262
+ st.title("🌍 Land Change Detection")
263
+ uploaded_file2 = st.file_uploader("Upload first image", type=['tif', 'png', 'jpg'])
264
+ uploaded_file3 = st.file_uploader("Upload second image", type=['tif', 'png', 'jpg'])
265
+
266
+ if uploaded_file2 is not None and uploaded_file3 is not None:
267
+ detect_land_changes(uploaded_file2, uploaded_file3)
268
+
269
+ elif option == "Animal Monitoring":
270
+ st.title("Animal Monitoring")
271
+ uploaded_file4 = st.file_uploader("Upload Image/Video", type=["jpg", "jpeg", "png", "mp4"])
272
+
273
+ if uploaded_file4:
274
+ if uploaded_file4.type.startswith("image"):
275
+ file_bytes = np.asarray(bytearray(uploaded_file4.read()), dtype=np.uint8)
276
+ image = cv2.imdecode(file_bytes, 1)
277
+
278
+ if image is None:
279
+ st.error("Error loading image. Please upload a valid image file.")
280
+ else:
281
+ results = yolo_model(image)
282
+ for result in results:
283
+ for box in result.boxes.xyxy:
284
+ x1, y1, x2, y2 = map(int, box[:4])
285
+ cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
286
+
287
+ st.image(image, caption="Detected Animals", channels="BGR")
288
+ st.write(f"Estimated Count: {len(results[0].boxes)}")
289
+
290
+ elif uploaded_file4.type.startswith("video"):
291
+ tfile = tempfile.NamedTemporaryFile(delete=False)
292
+ tfile.write(uploaded_file4.read())
293
+ cap = cv2.VideoCapture(tfile.name)
294
+
295
+ if not cap.isOpened():
296
+ st.error("Error loading video. Please upload a valid video file.")
297
+ else:
298
+ stframe = st.empty()
299
+ st.write("Processing video...")
300
+
301
+ while cap.isOpened():
302
+ ret, frame = cap.read()
303
+ if not ret:
304
+ break
305
+
306
+ frame = cv2.resize(frame, (640, 480))
307
+ results = yolo_model(frame)
308
+
309
+ for result in results:
310
+ for box in result.boxes.xyxy:
311
+ x1, y1, x2, y2 = map(int, box[:4])
312
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
313
+
314
+ stframe.image(frame, channels="BGR")
315
+ time.sleep(0.03)
316
+
317
+ cap.release()
318
+
319
+ elif option == "Threat Detection":
320
+ st.title("Threat Detection and Prevention")
321
+ st.sidebar.header("Choose Threat Detection")
322
+ detection_option = st.sidebar.selectbox(
323
+ "Select an option",
324
+ ["Poaching Alerts"]
325
+ )
326
+
327
+ if detection_option in ["Poaching Alerts"]:
328
+ uploaded_file7 = st.file_uploader("Upload Image", type=['jpg', 'jpeg', 'png'])
329
+
330
+ if uploaded_file7:
331
+ image = Image.open(uploaded_file7)
332
+ st.image(image, caption="Uploaded Image", use_column_width=True)
333
+
334
+ if detection_option == "Poaching Alerts":
335
+ st.subheader("🎯 Poaching Activity Detection")
336
+
337
+ with st.spinner("Analyzing image for potential poaching activities..."):
338
+ results = yolo_model(image)
339
+
340
+ poaching_objects = ['person', 'gun', 'knife', 'truck', 'car']
341
+ detections = {}
342
+
343
+ for result in results:
344
+ for box in result.boxes:
345
+ cls = int(box.cls[0])
346
+ conf = float(box.conf[0])
347
+ label = result.names[cls]
348
+
349
+ if label in poaching_objects and conf > 0.3:
350
+ detections[label] = conf
351
+
352
+ if detections:
353
+ for obj, conf in detections.items():
354
+ st.progress(conf)
355
+ st.write(f"{obj.title()}: {conf*100:.1f}% confidence")
356
+
357
+ annotated_img = np.array(image)
358
+ for result in results:
359
+ for box in result.boxes:
360
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
361
+ cv2.rectangle(annotated_img, (x1, y1), (x2, y2), (255, 0, 0), 2)
362
+
363
+ st.image(annotated_img, caption="Detected Objects", use_column_width=True)
364
+
365
+ if any(conf > 0.7 for conf in detections.values()):
366
+ st.error("⚠️ High-risk poaching activity detected! Alert sent to authorities.")
367
+ else:
368
+ st.success("No suspicious activities detected.")
369
+
370
+ if __name__ == "__main__":
371
+ main()