Update app.py
Browse files
app.py
CHANGED
@@ -6,12 +6,13 @@ from PIL import Image, ImageEnhance
|
|
6 |
from ultralytics import YOLO
|
7 |
import json
|
8 |
|
9 |
-
|
10 |
model_path = "best.pt"
|
11 |
model = YOLO(model_path)
|
12 |
|
13 |
def preprocess_image(image):
|
14 |
-
image
|
|
|
|
|
15 |
|
16 |
image = ImageEnhance.Sharpness(image).enhance(2.0) # Increase sharpness
|
17 |
image = ImageEnhance.Contrast(image).enhance(1.5) # Increase contrast
|
@@ -25,12 +26,8 @@ def preprocess_image(image):
|
|
25 |
|
26 |
return image
|
27 |
|
28 |
-
def imageRotation(image):
|
29 |
-
"""Dummy function for now."""
|
30 |
-
return image
|
31 |
-
|
32 |
def vision_ai_api(image, label):
|
33 |
-
"""Dummy function simulating API call. Returns dummy JSON response."""
|
34 |
return {
|
35 |
"label": label,
|
36 |
"extracted_data": {
|
@@ -41,19 +38,29 @@ def vision_ai_api(image, label):
|
|
41 |
}
|
42 |
|
43 |
def predict(image):
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
47 |
|
48 |
image = preprocess_image(image) # Apply preprocessing
|
49 |
|
50 |
-
|
|
|
51 |
|
|
|
|
|
|
|
52 |
detected_classes = set()
|
53 |
labels = []
|
54 |
cropped_images = {}
|
55 |
|
|
|
56 |
for result in results:
|
|
|
|
|
|
|
|
|
57 |
for box in result.boxes:
|
58 |
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
59 |
conf = box.conf[0]
|
@@ -65,16 +72,17 @@ def predict(image):
|
|
65 |
detected_classes.add(class_name)
|
66 |
labels.append(f"{class_name} {conf:.2f}")
|
67 |
|
68 |
-
# Ensure bounding boxes are within the image
|
69 |
-
height, width =
|
70 |
x1, y1, x2, y2 = max(0, x1), max(0, y1), min(width, x2), min(height, y2)
|
71 |
|
72 |
if x1 >= x2 or y1 >= y2:
|
73 |
print("Invalid bounding box, skipping.")
|
74 |
continue
|
75 |
|
76 |
-
|
77 |
-
|
|
|
78 |
|
79 |
# Call API
|
80 |
api_response = vision_ai_api(cropped_pil, class_name)
|
@@ -83,6 +91,7 @@ def predict(image):
|
|
83 |
"api_response": json.dumps(api_response, indent=4)
|
84 |
}
|
85 |
|
|
|
86 |
if not cropped_images:
|
87 |
return None, "No front detected", None, "No back detected", ["No valid detections"]
|
88 |
|
@@ -95,11 +104,9 @@ def predict(image):
|
|
95 |
)
|
96 |
|
97 |
|
98 |
-
|
99 |
-
# Gradio Interface
|
100 |
iface = gr.Interface(
|
101 |
fn=predict,
|
102 |
-
inputs="
|
103 |
outputs=["image", "text"],
|
104 |
title="License Field Detection (Front & Back Card)",
|
105 |
description="Detect front & back of a license card, crop the images, and call Vision AI API separately for each."
|
|
|
6 |
from ultralytics import YOLO
|
7 |
import json
|
8 |
|
|
|
9 |
model_path = "best.pt"
|
10 |
model = YOLO(model_path)
|
11 |
|
12 |
def preprocess_image(image):
|
13 |
+
"""Preprocesses the image: enhances sharpness, contrast, brightness, and resizes it."""
|
14 |
+
if isinstance(image, np.ndarray): # Ensure it's a PIL image
|
15 |
+
image = Image.fromarray(image)
|
16 |
|
17 |
image = ImageEnhance.Sharpness(image).enhance(2.0) # Increase sharpness
|
18 |
image = ImageEnhance.Contrast(image).enhance(1.5) # Increase contrast
|
|
|
26 |
|
27 |
return image
|
28 |
|
|
|
|
|
|
|
|
|
29 |
def vision_ai_api(image, label):
|
30 |
+
"""Dummy function simulating an API call. Returns dummy JSON response."""
|
31 |
return {
|
32 |
"label": label,
|
33 |
"extracted_data": {
|
|
|
38 |
}
|
39 |
|
40 |
def predict(image):
|
41 |
+
"""Runs YOLO object detection on the input image and processes detected regions."""
|
42 |
+
# Ensure image is PIL format before preprocessing
|
43 |
+
if isinstance(image, np.ndarray):
|
44 |
+
image = Image.fromarray(image)
|
45 |
|
46 |
image = preprocess_image(image) # Apply preprocessing
|
47 |
|
48 |
+
# Convert image to NumPy array for YOLO model
|
49 |
+
image_np = np.array(image)
|
50 |
|
51 |
+
# Run YOLO prediction
|
52 |
+
results = model(image_np, conf=0.80)
|
53 |
+
|
54 |
detected_classes = set()
|
55 |
labels = []
|
56 |
cropped_images = {}
|
57 |
|
58 |
+
# Ensure results contain boxes
|
59 |
for result in results:
|
60 |
+
if result.boxes is None or len(result.boxes) == 0:
|
61 |
+
print("No objects detected.")
|
62 |
+
continue
|
63 |
+
|
64 |
for box in result.boxes:
|
65 |
x1, y1, x2, y2 = map(int, box.xyxy[0])
|
66 |
conf = box.conf[0]
|
|
|
72 |
detected_classes.add(class_name)
|
73 |
labels.append(f"{class_name} {conf:.2f}")
|
74 |
|
75 |
+
# Ensure bounding boxes are within the image dimensions
|
76 |
+
height, width = image_np.shape[:2]
|
77 |
x1, y1, x2, y2 = max(0, x1), max(0, y1), min(width, x2), min(height, y2)
|
78 |
|
79 |
if x1 >= x2 or y1 >= y2:
|
80 |
print("Invalid bounding box, skipping.")
|
81 |
continue
|
82 |
|
83 |
+
# Crop the detected region
|
84 |
+
cropped = image_np[y1:y2, x1:x2]
|
85 |
+
cropped_pil = Image.fromarray(cropped) # Convert to PIL for API
|
86 |
|
87 |
# Call API
|
88 |
api_response = vision_ai_api(cropped_pil, class_name)
|
|
|
91 |
"api_response": json.dumps(api_response, indent=4)
|
92 |
}
|
93 |
|
94 |
+
# Ensure outputs exist even if no detections were made
|
95 |
if not cropped_images:
|
96 |
return None, "No front detected", None, "No back detected", ["No valid detections"]
|
97 |
|
|
|
104 |
)
|
105 |
|
106 |
|
|
|
|
|
107 |
iface = gr.Interface(
|
108 |
fn=predict,
|
109 |
+
inputs=gr.Image(type="pil"), # Ensure input is PIL image
|
110 |
outputs=["image", "text"],
|
111 |
title="License Field Detection (Front & Back Card)",
|
112 |
description="Detect front & back of a license card, crop the images, and call Vision AI API separately for each."
|