Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -4,27 +4,44 @@ from PIL import Image
|
|
4 |
import torch
|
5 |
import numpy as np
|
6 |
import spaces
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
# Load model and processor from the Hugging Face Hub
|
8 |
MODEL_REPO = "Rausda6/autotrain-yh172-uui7d" # Replace with your actual model repo name
|
|
|
9 |
model = AutoModelForImageClassification.from_pretrained(MODEL_REPO)
|
10 |
processor = AutoImageProcessor.from_pretrained(MODEL_REPO)
|
11 |
|
12 |
labels = model.config.id2label
|
|
|
13 |
@spaces.GPU
|
14 |
def classify_image(img: Image.Image):
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
# Gradio interface
|
30 |
demo = gr.Interface(
|
|
|
4 |
import torch
|
5 |
import numpy as np
|
6 |
import spaces
|
7 |
+
import logging
|
8 |
+
|
9 |
+
# Set up verbose logging
|
10 |
+
logging.basicConfig(level=logging.DEBUG)
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
+
|
13 |
# Load model and processor from the Hugging Face Hub
|
14 |
MODEL_REPO = "Rausda6/autotrain-yh172-uui7d" # Replace with your actual model repo name
|
15 |
+
logger.debug(f"Loading model from: {MODEL_REPO}")
|
16 |
model = AutoModelForImageClassification.from_pretrained(MODEL_REPO)
|
17 |
processor = AutoImageProcessor.from_pretrained(MODEL_REPO)
|
18 |
|
19 |
labels = model.config.id2label
|
20 |
+
|
21 |
@spaces.GPU
|
22 |
def classify_image(img: Image.Image):
|
23 |
+
logger.debug("Received image for classification.")
|
24 |
+
try:
|
25 |
+
inputs = processor(images=img, return_tensors="pt")
|
26 |
+
logger.debug(f"Processed inputs: {inputs}")
|
27 |
+
with torch.no_grad():
|
28 |
+
outputs = model(**inputs)
|
29 |
+
logger.debug(f"Model outputs: {outputs}")
|
30 |
+
logits = outputs.logits
|
31 |
+
probs = torch.nn.functional.softmax(logits, dim=-1)[0]
|
32 |
+
logger.debug(f"Probabilities: {probs}")
|
33 |
+
|
34 |
+
# Build result dictionary with confidence values
|
35 |
+
probs_dict = {labels[i]: float(probs[i]) for i in range(len(probs))}
|
36 |
+
# Sort and format nicely
|
37 |
+
sorted_probs = sorted(probs_dict.items(), key=lambda x: x[1], reverse=True)
|
38 |
+
top_label, top_score = sorted_probs[0]
|
39 |
+
|
40 |
+
logger.debug(f"Top prediction: {top_label} with confidence {top_score:.2%}")
|
41 |
+
return {"Prediction": top_label, "Confidence": f"{top_score:.2%}"}, dict(sorted_probs)
|
42 |
+
except Exception as e:
|
43 |
+
logger.exception("Error during classification")
|
44 |
+
raise e
|
45 |
|
46 |
# Gradio interface
|
47 |
demo = gr.Interface(
|