syedfaisalabrar commited on
Commit
af1f46a
·
verified ·
1 Parent(s): d885a52

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -6
app.py CHANGED
@@ -41,8 +41,12 @@ def vision_ai_api(image, label):
41
  }
42
 
43
  def predict(image):
44
- image = preprocess_image(image)
45
-
 
 
 
 
46
  results = model(image, conf=0.80)
47
 
48
  detected_classes = set()
@@ -62,19 +66,22 @@ def predict(image):
62
  labels.append(f"{class_name} {conf:.2f}")
63
 
64
  # Ensure bounding boxes are within the image
65
- height, width = image.shape[:2]
66
  x1, y1, x2, y2 = max(0, x1), max(0, y1), min(width, x2), min(height, y2)
67
 
68
  if x1 >= x2 or y1 >= y2:
69
  print("Invalid bounding box, skipping.")
70
  continue
71
 
72
- cropped = image[y1:y2, x1:x2]
73
- cropped_pil = Image.fromarray(cropped)
74
 
75
  # Call API
76
  api_response = vision_ai_api(cropped_pil, class_name)
77
- cropped_images[class_name] = {"image": cropped_pil, "api_response": json.dumps(api_response, indent=4)}
 
 
 
78
 
79
  if not cropped_images:
80
  return None, "No front detected", None, "No back detected", ["No valid detections"]
@@ -88,6 +95,7 @@ def predict(image):
88
  )
89
 
90
 
 
91
  # Gradio Interface
92
  iface = gr.Interface(
93
  fn=predict,
 
41
  }
42
 
43
  def predict(image):
44
+ # Convert PIL image to NumPy array
45
+ if isinstance(image, Image.Image):
46
+ image = np.array(image)
47
+
48
+ image = preprocess_image(image) # Apply preprocessing
49
+
50
  results = model(image, conf=0.80)
51
 
52
  detected_classes = set()
 
66
  labels.append(f"{class_name} {conf:.2f}")
67
 
68
  # Ensure bounding boxes are within the image
69
+ height, width = image.shape[:2] # ✅ This now works
70
  x1, y1, x2, y2 = max(0, x1), max(0, y1), min(width, x2), min(height, y2)
71
 
72
  if x1 >= x2 or y1 >= y2:
73
  print("Invalid bounding box, skipping.")
74
  continue
75
 
76
+ cropped = image[y1:y2, x1:x2] # Crop the detected region
77
+ cropped_pil = Image.fromarray(cropped) # Convert back to PIL
78
 
79
  # Call API
80
  api_response = vision_ai_api(cropped_pil, class_name)
81
+ cropped_images[class_name] = {
82
+ "image": cropped_pil,
83
+ "api_response": json.dumps(api_response, indent=4)
84
+ }
85
 
86
  if not cropped_images:
87
  return None, "No front detected", None, "No back detected", ["No valid detections"]
 
95
  )
96
 
97
 
98
+
99
  # Gradio Interface
100
  iface = gr.Interface(
101
  fn=predict,