radub23
commited on
Commit
·
aefec06
1
Parent(s):
059897f
Completely rewrite detect_warning_lamp with robust tensor handling and debugging
Browse files
app.py
CHANGED
@@ -41,27 +41,78 @@ def detect_warning_lamp(image, history: list[tuple[str, str]], system_message):
|
|
41 |
Returns:
|
42 |
Updated chat history with prediction results
|
43 |
"""
|
|
|
|
|
|
|
|
|
44 |
try:
|
|
|
|
|
|
|
45 |
# Convert PIL image to FastAI compatible format
|
46 |
img = PILImage(image)
|
|
|
47 |
|
48 |
# Get model prediction
|
|
|
49 |
pred_class, pred_idx, probs = learn_inf.predict(img)
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
# Format the prediction results
|
52 |
-
|
53 |
-
response = f"Detected Warning Lamp: {pred_class}\nConfidence: {confidence:.2%}"
|
54 |
|
55 |
# Add probabilities for all classes
|
56 |
response += "\n\nProbabilities for all classes:"
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
# Update chat history
|
61 |
history.append((None, response))
|
62 |
return history
|
63 |
except Exception as e:
|
64 |
error_msg = f"Error processing image: {str(e)}"
|
|
|
|
|
|
|
65 |
history.append((None, error_msg))
|
66 |
return history
|
67 |
|
|
|
41 |
Returns:
|
42 |
Updated chat history with prediction results
|
43 |
"""
|
44 |
+
if image is None:
|
45 |
+
history.append((None, "Please upload an image first."))
|
46 |
+
return history
|
47 |
+
|
48 |
try:
|
49 |
+
# Print debug info
|
50 |
+
print(f"Image type: {type(image)}")
|
51 |
+
|
52 |
# Convert PIL image to FastAI compatible format
|
53 |
img = PILImage(image)
|
54 |
+
print(f"Converted to PILImage: {type(img)}")
|
55 |
|
56 |
# Get model prediction
|
57 |
+
print("Running prediction...")
|
58 |
pred_class, pred_idx, probs = learn_inf.predict(img)
|
59 |
|
60 |
+
# Print debug info about prediction results
|
61 |
+
print(f"Prediction class type: {type(pred_class)}, value: {pred_class}")
|
62 |
+
print(f"Prediction index type: {type(pred_idx)}, value: {pred_idx}")
|
63 |
+
print(f"Probabilities type: {type(probs)}, shape: {probs.shape if hasattr(probs, 'shape') else 'no shape'}")
|
64 |
+
|
65 |
+
# Safely convert tensors to Python types
|
66 |
+
try:
|
67 |
+
# Handle pred_class (could be string or tensor)
|
68 |
+
if hasattr(pred_class, 'item'):
|
69 |
+
pred_class_str = str(pred_class.item())
|
70 |
+
else:
|
71 |
+
pred_class_str = str(pred_class)
|
72 |
+
|
73 |
+
# Handle pred_idx (convert tensor to int)
|
74 |
+
if hasattr(pred_idx, 'item'):
|
75 |
+
pred_idx_int = pred_idx.item()
|
76 |
+
else:
|
77 |
+
pred_idx_int = int(pred_idx)
|
78 |
+
|
79 |
+
# Get confidence score for predicted class
|
80 |
+
if hasattr(probs[pred_idx_int], 'item'):
|
81 |
+
confidence = probs[pred_idx_int].item()
|
82 |
+
else:
|
83 |
+
confidence = float(probs[pred_idx_int])
|
84 |
+
|
85 |
+
print(f"Converted values - class: {pred_class_str}, index: {pred_idx_int}, confidence: {confidence}")
|
86 |
+
except Exception as conversion_error:
|
87 |
+
print(f"Error during tensor conversion: {conversion_error}")
|
88 |
+
raise
|
89 |
+
|
90 |
# Format the prediction results
|
91 |
+
response = f"Detected Warning Lamp: {pred_class_str}\nConfidence: {confidence:.2%}"
|
|
|
92 |
|
93 |
# Add probabilities for all classes
|
94 |
response += "\n\nProbabilities for all classes:"
|
95 |
+
|
96 |
+
# Safely iterate through probabilities
|
97 |
+
for i, cls in enumerate(learn_inf.dls.vocab):
|
98 |
+
try:
|
99 |
+
if hasattr(probs[i], 'item'):
|
100 |
+
prob_value = probs[i].item()
|
101 |
+
else:
|
102 |
+
prob_value = float(probs[i])
|
103 |
+
response += f"\n- {cls}: {prob_value:.2%}"
|
104 |
+
except Exception as prob_error:
|
105 |
+
print(f"Error processing probability for class {cls}: {prob_error}")
|
106 |
+
response += f"\n- {cls}: Error"
|
107 |
|
108 |
# Update chat history
|
109 |
history.append((None, response))
|
110 |
return history
|
111 |
except Exception as e:
|
112 |
error_msg = f"Error processing image: {str(e)}"
|
113 |
+
print(f"Exception in detect_warning_lamp: {e}")
|
114 |
+
import traceback
|
115 |
+
traceback.print_exc()
|
116 |
history.append((None, error_msg))
|
117 |
return history
|
118 |
|