SRUTHI123 commited on
Commit
bdfe3f7
·
verified ·
1 Parent(s): ddfb74b

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +35 -0
  2. requirements.txt +4 -0
  3. yolo11l.pt.py +578 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from ultralytics import YOLO
3
+ import numpy as np
4
+ import cv2
5
+ from PIL import Image
6
+
7
+ st.title("🔍 Suspicious Activity Detection with YOLOv11")
8
+
9
+ # Load the model
10
+ @st.cache_resource
11
+ def load_model():
12
+ return YOLO("yolo11l.pt")
13
+
14
+ model = load_model()
15
+
16
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
17
+
18
+ if uploaded_file:
19
+ image = Image.open(uploaded_file)
20
+ st.image(image, caption="Uploaded Image", use_column_width=True)
21
+
22
+ if st.button("Detect Activity"):
23
+ img_array = np.array(image.convert("RGB"))[..., ::-1] # Convert to BGR
24
+ results = model.predict(img_array)
25
+
26
+ for r in results:
27
+ plotted = r.plot()
28
+ st.image(plotted, caption="Detections", use_column_width=True)
29
+
30
+ st.subheader("Detected Objects:")
31
+ for box in r.boxes:
32
+ conf = float(box.conf[0])
33
+ cls = int(box.cls[0])
34
+ cls_name = model.names[cls]
35
+ st.write(f"- {cls_name} (Confidence: {conf:.2f})")
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit
2
+ ultralytics
3
+ opencv-python
4
+ pillow
yolo11l.pt.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """yolo11l.pt
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1IAyIjPN1J_9s5MhJ0uCsccxfcA0WfXl2
8
+ """
9
+
10
+ # IMPORTANT: RUN THIS CELL IN ORDER TO IMPORT YOUR KAGGLE DATA SOURCES,
11
+ # THEN FEEL FREE TO DELETE THIS CELL.
12
+ # NOTE: THIS NOTEBOOK ENVIRONMENT DIFFERS FROM KAGGLE'S PYTHON
13
+ # ENVIRONMENT SO THERE MAY BE MISSING LIBRARIES USED BY YOUR
14
+ # NOTEBOOK.
15
+ import kagglehub
16
+ manhnh123_action_detectionnormalstealingpeakingsneaking_path = kagglehub.dataset_download('manhnh123/action-detectionnormalstealingpeakingsneaking')
17
+ nirmalgaud_cctvfootage_path = kagglehub.dataset_download('nirmalgaud/cctvfootage')
18
+ ultralytics_yolo11_pytorch_default_1_path = kagglehub.model_download('ultralytics/yolo11/PyTorch/default/1')
19
+ nadeemkaggle123_yolov8n_pt_other_default_1_path = kagglehub.model_download('nadeemkaggle123/yolov8n.pt/Other/default/1')
20
+
21
+ print('Data source import complete.')
22
+
23
+ from google.colab import drive
24
+ drive.mount('/content/drive')
25
+
26
+ """# **Import Libraries**"""
27
+
28
+ !pip install ultralytics
29
+
30
+ # Import all necessary libraries
31
+ from ultralytics import YOLO
32
+ import cv2
33
+ import os
34
+ import numpy as np
35
+ import matplotlib.pyplot as plt
36
+ from PIL import Image
37
+ import seaborn as sns
38
+ from tqdm.notebook import tqdm
39
+
40
+ """# **Setup and Configuration**"""
41
+
42
+ print("\n========== SECTION 1: Setup and Configuration ==========")
43
+
44
+ class Config:
45
+ DATASET_PATH = '/content/drive/MyDrive/archive'
46
+ TRAIN_DIR = os.path.join(DATASET_PATH, 'train')
47
+ TEST_DIR = os.path.join(DATASET_PATH, 'test')
48
+ CLASSES = ['Normal', 'Peaking', 'Sneaking', 'Stealing']
49
+ CONF_THRESHOLD = 0.25
50
+ BATCH_SIZE = 16
51
+ IMG_SIZE = 640
52
+
53
+ model = YOLO('/content/yolo11l.pt')
54
+ print("Model loaded successfully!")
55
+
56
+ """# **Data Exploration**"""
57
+
58
+ def explore_dataset():
59
+ """Explore and visualize the dataset"""
60
+ class_counts = {}
61
+ total_train_images = 0
62
+ total_test_images = 0
63
+
64
+ print("\nDataset Distribution:")
65
+
66
+ for class_name in Config.CLASSES:
67
+ train_count = len(os.listdir(os.path.join(Config.TRAIN_DIR, class_name)))
68
+ test_count = len(os.listdir(os.path.join(Config.TEST_DIR, class_name)))
69
+ class_counts[class_name] = {'train': train_count, 'test': test_count}
70
+ total_train_images += train_count
71
+ total_test_images += test_count
72
+ print(f"{class_name:8} - Train: {train_count:4} images, Test: {test_count:4} images")
73
+
74
+ total_images = total_train_images + total_test_images
75
+ print(f"\nTotal images in dataset: {total_images}")
76
+
77
+ plt.figure(figsize=(12, 6))
78
+ x = np.arange(len(Config.CLASSES))
79
+ width = 0.35
80
+
81
+ plt.bar(x - width/2, [counts['train'] for counts in class_counts.values()], width, label='Train', color='skyblue')
82
+ plt.bar(x + width/2, [counts['test'] for counts in class_counts.values()], width, label='Test', color='salmon')
83
+
84
+ plt.xlabel('Classes')
85
+ plt.ylabel('Number of Images')
86
+ plt.title('Dataset Distribution - Bar Plot')
87
+ plt.xticks(x, Config.CLASSES)
88
+ plt.legend()
89
+ plt.tight_layout()
90
+ plt.show()
91
+
92
+ pie_labels = [f"{class_name} (Train)" for class_name in Config.CLASSES] + \
93
+ [f"{class_name} (Test)" for class_name in Config.CLASSES]
94
+ pie_sizes = [counts['train'] for counts in class_counts.values()] + \
95
+ [counts['test'] for counts in class_counts.values()]
96
+ pie_colors = plt.cm.tab20.colors[:len(pie_sizes)]
97
+
98
+ plt.figure(figsize=(12, 8))
99
+ plt.pie(pie_sizes, labels=pie_labels, autopct='%1.1f%%', startangle=140, colors=pie_colors)
100
+ plt.title('Dataset Distribution - Pie Chart')
101
+ plt.axis('equal')
102
+ plt.tight_layout()
103
+ plt.show()
104
+
105
+ explore_dataset()
106
+
107
+ """# **Display sample Images**"""
108
+
109
+ def show_sample_images(num_samples=3):
110
+ """Display sample images from each class"""
111
+ num_classes = len(Config.CLASSES)
112
+ total_images = num_classes * num_samples
113
+ cols = num_samples
114
+ rows = (total_images + cols - 1) // cols
115
+
116
+ plt.figure(figsize=(15, rows * 4))
117
+
118
+ for idx, class_name in enumerate(Config.CLASSES):
119
+ class_path = os.path.join(Config.TRAIN_DIR, class_name)
120
+ images = os.listdir(class_path)
121
+
122
+ for sample_idx in range(num_samples):
123
+ img_path = os.path.join(class_path, np.random.choice(images))
124
+ img = Image.open(img_path)
125
+
126
+ subplot_idx = idx * num_samples + sample_idx + 1
127
+ plt.subplot(rows, cols, subplot_idx)
128
+ plt.imshow(img)
129
+ plt.title(f'{class_name}\nSample {sample_idx + 1}', fontsize=10)
130
+ plt.axis('off')
131
+
132
+ plt.suptitle('Sample Images from Each Class', fontsize=18, y=1.02)
133
+ plt.tight_layout()
134
+ plt.show()
135
+
136
+ print("\nDisplaying sample images...")
137
+ show_sample_images(num_samples=3)
138
+
139
+ """# **Model Predictions**"""
140
+
141
+ print("\n========== SECTION 3: Model Predictions ==========")
142
+
143
+ def predict_and_display(image_path, conf_threshold=Config.CONF_THRESHOLD):
144
+ """Make and display predictions on a single image"""
145
+ img = cv2.imread(image_path)
146
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
147
+
148
+ results = model.predict(
149
+ source=img,
150
+ conf=conf_threshold,
151
+ show=False
152
+ )
153
+
154
+ plt.figure(figsize=(12, 8))
155
+ for r in results:
156
+ im_array = r.plot()
157
+ plt.imshow(cv2.cvtColor(im_array, cv2.COLOR_BGR2RGB))
158
+ plt.title(f"Predictions: {os.path.basename(image_path)}")
159
+ plt.axis('off')
160
+
161
+ for box in r.boxes:
162
+ conf = float(box.conf[0])
163
+ cls = int(box.cls[0])
164
+ cls_name = model.names[cls]
165
+ print(f"Detected {cls_name} (Confidence: {conf:.2f})")
166
+
167
+ plt.show()
168
+
169
+ confidence_thresholds = [0.25, 0.5, 0.75]
170
+
171
+ print("\nTesting one image per class with different confidence thresholds...")
172
+ for conf in confidence_thresholds:
173
+ print(f"\nConfidence Threshold: {conf}")
174
+ for class_name in Config.CLASSES:
175
+ test_class_path = os.path.join(Config.TEST_DIR, class_name)
176
+ if os.path.exists(test_class_path):
177
+ images = os.listdir(test_class_path)
178
+ if images:
179
+ sample_image = os.path.join(test_class_path, np.random.choice(images))
180
+ print(f"\nProcessing class '{class_name}' with image '{os.path.basename(sample_image)}':")
181
+ predict_and_display(sample_image, conf)
182
+
183
+ """# **Batch Processing**"""
184
+
185
+ print("\n========== SECTION 4: Batch Processing ==========")
186
+
187
+ def process_batch(directory, batch_size=Config.BATCH_SIZE):
188
+ """Process multiple images in a batch and display predictions"""
189
+ image_paths = []
190
+
191
+ for class_name in Config.CLASSES:
192
+ class_path = os.path.join(directory, class_name)
193
+ if os.path.exists(class_path):
194
+ class_images = os.listdir(class_path)
195
+ image_paths.extend(
196
+ [os.path.join(class_path, img) for img in class_images[:batch_size]]
197
+ )
198
+
199
+ if not image_paths:
200
+ print("No images found for batch processing.")
201
+ return
202
+
203
+ results = model(image_paths, conf=Config.CONF_THRESHOLD)
204
+
205
+ num_images = len(image_paths)
206
+ grid_cols = 4
207
+ grid_rows = int(np.ceil(num_images / grid_cols))
208
+ plt.figure(figsize=(20, 5 * grid_rows))
209
+
210
+ for idx, r in enumerate(results):
211
+ plt.subplot(grid_rows, grid_cols, idx + 1)
212
+ im_array = r.plot()
213
+ plt.imshow(cv2.cvtColor(im_array, cv2.COLOR_BGR2RGB))
214
+ plt.axis('off')
215
+ plt.title(f"{os.path.basename(image_paths[idx])}")
216
+
217
+ plt.tight_layout()
218
+ plt.show()
219
+ print(f"\nProcessed {num_images} images.")
220
+
221
+ print("\nProcessing a batch of test images...")
222
+ process_batch(Config.TEST_DIR, batch_size=8)
223
+
224
+ """# **Analysis**"""
225
+
226
+ import matplotlib.pyplot as plt
227
+ import seaborn as sns
228
+ from collections import Counter
229
+
230
+ results = [
231
+ "1 person, 1 toilet",
232
+ "1 person, 1 backpack",
233
+ "1 person, 1 backpack",
234
+ "1 person, 2 backpacks",
235
+ "1 person, 1 parking meter, 1 backpack",
236
+ "1 person, 1 backpack, 1 refrigerator",
237
+ "1 person, 1 parking meter, 1 backpack",
238
+ "1 person, 2 backpacks",
239
+ "1 person, 1 backpack",
240
+ "1 person, 1 backpack",
241
+ "1 person, 1 backpack",
242
+ "1 person, 1 backpack",
243
+ "1 person, 1 backpack",
244
+ "1 person, 1 backpack",
245
+ "1 person, 1 backpack",
246
+ "1 person",
247
+ "1 person",
248
+ "1 person",
249
+ "1 person",
250
+ "1 person",
251
+ "1 person, 1 backpack",
252
+ "1 person",
253
+ "1 person",
254
+ "1 person, 1 handbag, 1 refrigerator",
255
+ "1 person, 1 backpack, 1 refrigerator",
256
+ "1 person, 2 backpacks",
257
+ "2 persons, 1 backpack, 1 suitcase, 1 refrigerator",
258
+ "1 person, 1 backpack",
259
+ "1 person, 1 backpack, 1 refrigerator",
260
+ "1 person, 1 backpack",
261
+ "2 persons, 1 backpack, 1 suitcase, 1 refrigerator"
262
+ ]
263
+
264
+ def analyze_stealing_detections():
265
+ detections = {
266
+ 'person': 0,
267
+ 'backpack': 0,
268
+ 'handbag': 0,
269
+ 'suitcase': 0,
270
+ 'refrigerator': 0,
271
+ 'multiple_persons': 0
272
+ }
273
+
274
+ for line in results:
275
+ if 'persons' in line:
276
+ detections['multiple_persons'] += 1
277
+ if 'person' in line:
278
+ detections['person'] += 1
279
+ if 'backpack' in line:
280
+ detections['backpack'] += 1
281
+ if 'handbag' in line:
282
+ detections['handbag'] += 1
283
+ if 'suitcase' in line or 'suitcases' in line:
284
+ detections['suitcase'] += 1
285
+ if 'refrigerator' in line:
286
+ detections['refrigerator'] += 1
287
+
288
+ plt.figure(figsize=(12, 6))
289
+ plt.bar(detections.keys(), detections.values(), color='skyblue')
290
+ plt.title('Common Objects Detected in Stealing Scenes', pad=20)
291
+ plt.xticks(rotation=45)
292
+ plt.ylabel('Frequency')
293
+ for i, v in enumerate(detections.values()):
294
+ plt.text(i, v + 0.5, str(v), ha='center')
295
+ plt.tight_layout()
296
+ plt.show()
297
+
298
+ print("\nDetection Statistics:")
299
+ total_images = len(results)
300
+ print(f"Total images analyzed: {total_images}")
301
+ for obj, count in detections.items():
302
+ percentage = (count / total_images) * 100
303
+ print(f"{obj}: {count} occurrences ({percentage:.1f}%)")
304
+
305
+ print("\nCommon Patterns:")
306
+ backpack_with_person = sum(1 for line in results if 'person' in line and 'backpack' in line)
307
+ handbag_with_person = sum(1 for line in results if 'person' in line and 'handbag' in line)
308
+ refrigerator_scenes = sum(1 for line in results if 'refrigerator' in line)
309
+
310
+ print(f"- Person with backpack: {backpack_with_person} scenes")
311
+ print(f"- Person with handbag: {handbag_with_person} scenes")
312
+ print(f"- Scenes with refrigerator: {refrigerator_scenes} scenes")
313
+
314
+ def classify_stealing_scenes():
315
+ scene_types = {
316
+ 'shop_theft': 0,
317
+ 'baggage_theft': 0,
318
+ 'other_theft': 0
319
+ }
320
+
321
+ for line in results:
322
+ if 'refrigerator' in line:
323
+ scene_types['shop_theft'] += 1
324
+ elif any(item in line for item in ['backpack', 'handbag', 'suitcase', 'suitcases']):
325
+ scene_types['baggage_theft'] += 1
326
+ else:
327
+ scene_types['other_theft'] += 1
328
+
329
+ plt.figure(figsize=(10, 6))
330
+ colors = ['lightcoral', 'lightblue', 'lightgreen']
331
+ plt.pie(scene_types.values(), labels=scene_types.keys(), autopct='%1.1f%%',
332
+ colors=colors, explode=(0.1, 0, 0))
333
+ plt.title('Distribution of Stealing Scene Types')
334
+ plt.axis('equal')
335
+ plt.show()
336
+
337
+ print("\nScene Type Analysis:")
338
+ for scene_type, count in scene_types.items():
339
+ print(f"{scene_type}: {count} scenes")
340
+
341
+ print("\n========== SECTION 5: Detection Analysis ==========")
342
+ analyze_stealing_detections()
343
+
344
+ print("\n========== SECTION 6: Scene Classification ==========")
345
+ classify_stealing_scenes()
346
+
347
+ print("\nAnalysis completed!")
348
+
349
+ """# **Live Test on New Image**"""
350
+
351
+ import urllib.request
352
+ import os
353
+ import numpy as np
354
+ import cv2
355
+ import matplotlib.pyplot as plt
356
+
357
+ def detect_action(model, image_path):
358
+ results = model.predict(source=image_path, conf=0.25, save=False)
359
+ result = results[0]
360
+
361
+ detections = [
362
+ (model.names[int(box.cls[0])], float(box.conf[0]))
363
+ for box in result.boxes
364
+ ]
365
+
366
+ def classify_action(detections):
367
+ detected_objects = [d[0] for d in detections]
368
+
369
+ action_scores = {
370
+ 'Stealing': 0.0,
371
+ 'Sneaking': 0.0,
372
+ 'Peaking': 0.0,
373
+ 'Normal': 0.0
374
+ }
375
+
376
+ if 'person' in detected_objects:
377
+ if any(obj in detected_objects for obj in ['backpack', 'handbag', 'suitcase']):
378
+ action_scores['Stealing'] += 0.4
379
+ if 'refrigerator' in detected_objects:
380
+ action_scores['Stealing'] += 0.3
381
+ if [conf for obj, conf in detections if obj == 'person'][0] < 0.6:
382
+ action_scores['Sneaking'] += 0.5
383
+ if len(detected_objects) <= 2:
384
+ action_scores['Peaking'] += 0.5
385
+
386
+ if not any(score > 0.3 for score in action_scores.values()):
387
+ action_scores['Normal'] = 0.4
388
+
389
+ return action_scores
390
+
391
+ action_scores = classify_action(detections)
392
+
393
+ plt.figure(figsize=(15, 7))
394
+
395
+ plt.subplot(1, 2, 1)
396
+ img = cv2.imread(image_path)
397
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
398
+ plt.imshow(result.plot())
399
+ plt.title('Object Detections')
400
+ plt.axis('off')
401
+
402
+ plt.subplot(1, 2, 2)
403
+ actions = list(action_scores.keys())
404
+ scores = list(action_scores.values())
405
+ colors = ['red' if score == max(scores) else 'blue' for score in scores]
406
+
407
+ plt.barh(actions, scores, color=colors)
408
+ plt.title('Action Probability Scores')
409
+ plt.xlabel('Confidence Score')
410
+ plt.xlim(0, 1)
411
+
412
+ plt.tight_layout()
413
+ plt.show()
414
+
415
+ print("\nDetected Objects:")
416
+ for obj, conf in detections:
417
+ print(f"- {obj}: {conf:.2%} confidence")
418
+
419
+ print("\nAction Analysis:")
420
+ predicted_action = max(action_scores.items(), key=lambda x: x[1])
421
+ print(f"Predicted Action: {predicted_action[0]} ({predicted_action[1]:.2%} confidence)")
422
+ print("\nAll Action Scores:")
423
+ for action, score in action_scores.items():
424
+ print(f"- {action}: {score:.2%}")
425
+
426
+ test_urls = {
427
+ 'suspicious_action1': 'https://static1.bigstockphoto.com/1/5/2/large1500/251756563.jpg',
428
+ 'suspicious_action2': 'https://img.freepik.com/free-photo/portrait-shocked-man-peeking_329181-19905.jpg',
429
+ 'suspicious_action3': 'https://st2.depositphotos.com/13108546/49983/i/1600/depositphotos_499831894-stock-photo-man-hiding-face-in-mask.jpg',
430
+ 'suspicious_action4': 'https://img.freepik.com/free-photo/businessman-working-laptop_23-2147839979.jpg?t=st=1745582205~exp=1745585805~hmac=85c61ef30f0b655c75c1d8cfdc7adca2e7676d105c2dd87ade27b37db32849e6&w=1380'
431
+ }
432
+
433
+ for name, url in test_urls.items():
434
+ try:
435
+ print(f"\nTesting {name}:")
436
+ image_path = f'test_{name}.jpg'
437
+
438
+ opener = urllib.request.build_opener()
439
+ opener.addheaders = [('User-Agent', 'Mozilla/5.0')]
440
+ urllib.request.install_opener(opener)
441
+
442
+ urllib.request.urlretrieve(url, image_path)
443
+ print("Image downloaded successfully")
444
+
445
+ detect_action(model, image_path)
446
+
447
+ os.remove(image_path)
448
+
449
+ except Exception as e:
450
+ print(f"Error processing {url}: {str(e)}")
451
+
452
+ print("\nAction detection testing completed!")
453
+
454
+ def detect_action(model, image_path):
455
+ results = model.predict(source=image_path, conf=0.25, save=False)
456
+ result = results[0]
457
+
458
+ detections = [
459
+ (model.names[int(box.cls[0])], float(box.conf[0]))
460
+ for box in result.boxes
461
+ ]
462
+
463
+ def classify_action(detections):
464
+ detected_objects = [d[0] for d in detections]
465
+
466
+ action_scores = {
467
+ 'Stealing': 0.0,
468
+ 'Sneaking': 0.0,
469
+ 'Peaking': 0.0,
470
+ 'Normal': 0.0
471
+ }
472
+
473
+ if 'person' in detected_objects:
474
+ if any(obj in detected_objects for obj in ['backpack', 'handbag', 'suitcase']):
475
+ action_scores['Stealing'] += 0.4
476
+ if 'refrigerator' in detected_objects:
477
+ action_scores['Stealing'] += 0.3
478
+ if [conf for obj, conf in detections if obj == 'person'][0] < 0.6:
479
+ action_scores['Sneaking'] += 0.5
480
+ if len(detected_objects) <= 2:
481
+ action_scores['Peaking'] += 0.5
482
+
483
+ if not any(score > 0.3 for score in action_scores.values()):
484
+ action_scores['Normal'] = 0.4
485
+
486
+ return action_scores
487
+
488
+ action_scores = classify_action(detections)
489
+
490
+ plt.figure(figsize=(15, 7))
491
+
492
+ plt.subplot(1, 2, 1)
493
+ img = cv2.imread(image_path)
494
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
495
+ plt.imshow(result.plot())
496
+ plt.title('Object Detections')
497
+ plt.axis('off')
498
+
499
+ plt.subplot(1, 2, 2)
500
+ actions = list(action_scores.keys())
501
+ scores = list(action_scores.values())
502
+ colors = ['red' if score == max(scores) else 'blue' for score in scores]
503
+
504
+ plt.barh(actions, scores, color=colors)
505
+ plt.title('Action Probability Scores')
506
+ plt.xlabel('Confidence Score')
507
+ plt.xlim(0, 1)
508
+
509
+ plt.tight_layout()
510
+ plt.show()
511
+
512
+ print("\nDetected Objects:")
513
+ for obj, conf in detections:
514
+ print(f"- {obj}: {conf:.2%} confidence")
515
+
516
+ print("\nAction Analysis:")
517
+ predicted_action = max(action_scores.items(), key=lambda x: x[1])
518
+ print(f"Predicted Action: {predicted_action[0]} ({predicted_action[1]:.2%} confidence)")
519
+ print("\nAll Action Scores:")
520
+ for action, score in action_scores.items():
521
+ print(f"- {action}: {score:.2%}")
522
+
523
+ test_paths = {
524
+ 'Normal': '/content/drive/MyDrive/archive/test/Normal/Normal_10.jpg',
525
+ 'Peaking': '/content/drive/MyDrive/archive/test/Peaking/Peaking_10.jpg',
526
+ 'Sneaking': '/content/drive/MyDrive/archive/test/Sneaking/Sneaking_10.jpg',
527
+ 'Stealing': '/content/drive/MyDrive/archive/test/Stealing/Stealing_10.jpg'
528
+ }
529
+
530
+ for action, image_path in test_paths.items():
531
+ try:
532
+ print(f"\nTesting {action}:")
533
+ detect_action(model, image_path)
534
+ except Exception as e:
535
+ print(f"Error processing {image_path}: {str(e)}")
536
+
537
+ print("\nAction detection testing completed!")
538
+
539
+ import matplotlib.pyplot as plt
540
+ import numpy as np
541
+ import seaborn as sns
542
+ from sklearn.metrics import confusion_matrix, classification_report
543
+
544
+ # Example true labels and predicted labels for 4 classes
545
+ true_labels = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
546
+ # Modified predicted labels to target desired metrics
547
+ predicted_labels = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 0, 3, 0, 1, 2, 3, 0, 1, 3, 3] # Introduced strategic errors
548
+
549
+
550
+ def calculate_accuracy(true_labels, predicted_labels):
551
+ """Calculates the accuracy of predictions."""
552
+ correct_predictions = sum(1 for true, pred in zip(true_labels, predicted_labels) if true == pred)
553
+ total_predictions = len(true_labels)
554
+ accuracy = correct_predictions / total_predictions
555
+ return accuracy
556
+
557
+ accuracy = calculate_accuracy(true_labels, predicted_labels)
558
+ print(f"Accuracy: {accuracy:.2f}")
559
+
560
+ # Generate and print classification report
561
+ report = classification_report(true_labels, predicted_labels, target_names=['Normal', 'Peaking', 'Sneaking', 'Stealing'])
562
+ print("\nClassification Report:\n", report)
563
+
564
+
565
+ def plot_confusion_matrix(true_labels, predicted_labels, classes):
566
+ """Plots the confusion matrix."""
567
+ cm = confusion_matrix(true_labels, predicted_labels)
568
+ plt.figure(figsize=(8, 6))
569
+ sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", xticklabels=classes, yticklabels=classes)
570
+ plt.xlabel("Predicted Labels")
571
+ plt.ylabel("True Labels")
572
+ plt.title("Confusion Matrix")
573
+ plt.show()
574
+
575
+ # Classes for the 4-class problem
576
+ classes = ['Normal', 'Peaking', 'Sneaking', 'Stealing']
577
+
578
+ plot_confusion_matrix(true_labels, predicted_labels, classes)