Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -10,12 +10,9 @@ MODEL_IDENTIFIER = r"Ateeqq/ai-vs-human-image-detector"
|
|
10 |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
11 |
|
12 |
# --- Suppress specific warnings ---
|
13 |
-
# Suppress the specific PIL warning about potential decompression bombs
|
14 |
warnings.filterwarnings("ignore", message="Possibly corrupt EXIF data.")
|
15 |
-
# Suppress transformers warning about loading weights without specifying revision
|
16 |
warnings.filterwarnings("ignore", message=".*You are using the default legacy behaviour.*")
|
17 |
|
18 |
-
|
19 |
# --- Load Model and Processor (Load once at startup) ---
|
20 |
print(f"Using device: {DEVICE}")
|
21 |
print(f"Loading processor from: {MODEL_IDENTIFIER}")
|
@@ -28,66 +25,43 @@ try:
|
|
28 |
print("Model and processor loaded successfully.")
|
29 |
except Exception as e:
|
30 |
print(f"FATAL: Error loading model or processor: {e}")
|
31 |
-
# If the model fails to load, we raise an exception to stop the app
|
32 |
raise gr.Error(f"Failed to load the model: {e}. Cannot start the application.") from e
|
33 |
|
34 |
# --- Prediction Function ---
|
35 |
def classify_image(image_pil):
|
36 |
-
"""
|
37 |
-
Classifies an image as AI-generated or Human-made.
|
38 |
-
|
39 |
-
Args:
|
40 |
-
image_pil (PIL.Image.Image): Input image in PIL format.
|
41 |
-
|
42 |
-
Returns:
|
43 |
-
dict: A dictionary mapping class labels ('ai', 'human') to their
|
44 |
-
confidence scores. Returns an empty dict if input is None.
|
45 |
-
"""
|
46 |
if image_pil is None:
|
47 |
-
# Handle case where the user clears the image input
|
48 |
print("Warning: No image provided.")
|
49 |
-
return {}
|
50 |
|
51 |
print("Processing image...")
|
52 |
try:
|
53 |
-
# Ensure image is RGB
|
54 |
image = image_pil.convert("RGB")
|
55 |
-
|
56 |
-
# Preprocess using the loaded processor
|
57 |
inputs = processor(images=image, return_tensors="pt").to(DEVICE)
|
58 |
|
59 |
-
# Perform inference
|
60 |
print("Running inference...")
|
61 |
with torch.no_grad():
|
62 |
outputs = model(**inputs)
|
63 |
logits = outputs.logits
|
64 |
|
65 |
-
|
66 |
-
# outputs.logits is shape [1, num_labels], softmax over the last dim
|
67 |
-
probabilities = torch.softmax(logits, dim=-1)[0] # Get probabilities for the first (and only) image
|
68 |
-
|
69 |
-
# Create a dictionary of label -> score
|
70 |
results = {}
|
71 |
for i, prob in enumerate(probabilities):
|
72 |
label = model.config.id2label[i]
|
73 |
-
results[label] = round(prob.item(), 4)
|
74 |
|
75 |
print(f"Prediction results: {results}")
|
76 |
return results
|
77 |
-
|
78 |
except Exception as e:
|
79 |
print(f"Error during prediction: {e}")
|
80 |
-
# Return error in the format expected by gr.Label
|
81 |
-
# Provide a user-friendly error message in the output
|
82 |
return {"Error": f"Processing failed. Please try again or use a different image."}
|
83 |
|
84 |
# --- Define Example Images ---
|
85 |
example_dir = "examples"
|
86 |
example_images = []
|
87 |
-
if os.path.exists(example_dir) and os.listdir(example_dir):
|
88 |
for img_name in os.listdir(example_dir):
|
89 |
if img_name.lower().endswith(('.png', '.jpg', '.jpeg', '.webp')):
|
90 |
-
|
91 |
if example_images:
|
92 |
print(f"Found examples: {example_images}")
|
93 |
else:
|
@@ -95,92 +69,94 @@ if os.path.exists(example_dir) and os.listdir(example_dir): # Check if dir exist
|
|
95 |
else:
|
96 |
print("No 'examples' directory found or it's empty. Examples will not be shown.")
|
97 |
|
98 |
-
|
99 |
-
#
|
100 |
css = """
|
101 |
-
body { font-family: 'Inter', sans-serif; }
|
102 |
|
103 |
/* Style the main title */
|
104 |
#app-title {
|
105 |
text-align: center;
|
106 |
font-weight: bold;
|
107 |
-
font-size: 2.5em;
|
108 |
-
margin-bottom: 5px;
|
109 |
-
color
|
110 |
}
|
111 |
|
112 |
/* Style the description */
|
113 |
#app-description {
|
114 |
text-align: center;
|
115 |
font-size: 1.1em;
|
116 |
-
margin-bottom: 25px;
|
117 |
-
color
|
118 |
}
|
119 |
-
#app-description code { /* Style model name */
|
120 |
font-weight: bold;
|
121 |
-
background-color:
|
122 |
padding: 2px 5px;
|
123 |
border-radius: 4px;
|
|
|
124 |
}
|
125 |
#app-description strong { /* Style device name */
|
126 |
-
color: #
|
|
|
127 |
}
|
128 |
|
129 |
-
/* Style the results area */
|
130 |
-
#prediction-label .label-name { font-weight: bold; font-size: 1.1em; }
|
131 |
-
#prediction-label .confidence { font-size: 1em; }
|
132 |
-
|
133 |
/* Style the results heading */
|
134 |
#results-heading {
|
135 |
text-align: center;
|
136 |
-
font-size: 1.2em;
|
137 |
-
margin-bottom: 10px;
|
138 |
-
color
|
139 |
}
|
140 |
|
141 |
-
/*
|
142 |
-
.gradio-container .examples-container { padding-top: 15px; }
|
143 |
-
.gradio-container .examples-header { font-size: 1.1em; font-weight: bold; margin-bottom: 10px; color: #34495e; }
|
144 |
-
|
145 |
-
/* Add a subtle border/shadow to input/output columns for definition */
|
146 |
#input-column, #output-column {
|
147 |
-
border: 1px solid #
|
148 |
-
border-radius: 12px;
|
149 |
padding: 20px;
|
150 |
-
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.
|
151 |
-
background-color
|
152 |
}
|
153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
/* Footer styling */
|
155 |
#app-footer {
|
156 |
margin-top: 40px;
|
157 |
padding-top: 20px;
|
158 |
-
border-top: 1px solid #
|
159 |
text-align: center;
|
160 |
font-size: 0.9em;
|
161 |
-
color
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
}
|
163 |
-
#app-footer a { color: #3498db; text-decoration: none; }
|
164 |
-
#app-footer a:hover { text-decoration: underline; }
|
165 |
"""
|
166 |
|
167 |
# --- Gradio Interface using Blocks and Theme ---
|
168 |
-
#
|
169 |
theme = gr.themes.Soft(
|
170 |
-
primary_hue="emerald",
|
171 |
secondary_hue="blue",
|
172 |
neutral_hue="slate",
|
173 |
-
radius_size=gr.themes.sizes.radius_lg,
|
174 |
-
spacing_size=gr.themes.sizes.spacing_lg,
|
175 |
-
).
|
176 |
-
# Further fine-tuning
|
177 |
-
body_background_fill="#f8f9fa", # Very light grey background
|
178 |
-
block_radius="12px",
|
179 |
-
)
|
180 |
|
181 |
|
182 |
with gr.Blocks(theme=theme, css=css) as iface:
|
183 |
-
# Title and Description
|
184 |
gr.Markdown("# AI vs Human Image Detector", elem_id="app-title")
|
185 |
gr.Markdown(
|
186 |
f"Upload an image to classify if it was likely generated by AI or created by a human. "
|
@@ -188,59 +164,58 @@ with gr.Blocks(theme=theme, css=css) as iface:
|
|
188 |
elem_id="app-description"
|
189 |
)
|
190 |
|
191 |
-
# Main layout
|
192 |
-
with gr.Row(variant='panel'): #
|
193 |
with gr.Column(scale=1, min_width=300, elem_id="input-column"):
|
194 |
image_input = gr.Image(
|
195 |
type="pil",
|
196 |
label="🖼️ Upload Your Image",
|
197 |
sources=["upload", "webcam", "clipboard"],
|
198 |
-
height=400,
|
199 |
)
|
200 |
-
submit_button = gr.Button("🔍 Classify Image", variant="primary")
|
201 |
|
202 |
with gr.Column(scale=1, min_width=300, elem_id="output-column"):
|
203 |
-
# Use elem_id and target with CSS for styling
|
204 |
gr.Markdown("📊 **Prediction Results**", elem_id="results-heading")
|
205 |
result_output = gr.Label(
|
206 |
num_top_classes=2,
|
207 |
label="Classification",
|
208 |
elem_id="prediction-label"
|
|
|
209 |
)
|
210 |
|
211 |
# Examples Section
|
212 |
-
if example_images:
|
213 |
gr.Examples(
|
214 |
examples=example_images,
|
215 |
inputs=image_input,
|
216 |
outputs=result_output,
|
217 |
fn=classify_image,
|
218 |
-
cache_examples=True,
|
219 |
label="✨ Click an Example to Try!"
|
|
|
220 |
)
|
221 |
|
222 |
# Footer / Article section
|
223 |
-
|
224 |
-
|
225 |
---
|
226 |
This application uses a fine-tuned [SigLIP](https://huggingface.co/docs/transformers/model_doc/siglip) vision model
|
227 |
specifically trained to differentiate between images generated by Artificial Intelligence and those created by humans.
|
228 |
|
229 |
-
You can find the model card here: <a href='https://huggingface.co/{
|
230 |
|
231 |
Fine tuning code available at [https://exnrt.com/blog/ai/fine-tuning-siglip2/](https://exnrt.com/blog/ai/fine-tuning-siglip2/).
|
232 |
-
"""
|
233 |
elem_id="app-footer"
|
234 |
)
|
235 |
|
236 |
-
# Connect
|
237 |
-
# Use api_name for potential API usage later
|
238 |
submit_button.click(fn=classify_image, inputs=image_input, outputs=result_output, api_name="classify_image_button")
|
239 |
image_input.change(fn=classify_image, inputs=image_input, outputs=result_output, api_name="classify_image_change")
|
240 |
|
241 |
-
|
242 |
# --- Launch the App ---
|
243 |
if __name__ == "__main__":
|
244 |
print("Launching Gradio interface...")
|
245 |
-
iface.launch()
|
246 |
print("Gradio interface launched.")
|
|
|
10 |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
11 |
|
12 |
# --- Suppress specific warnings ---
|
|
|
13 |
warnings.filterwarnings("ignore", message="Possibly corrupt EXIF data.")
|
|
|
14 |
warnings.filterwarnings("ignore", message=".*You are using the default legacy behaviour.*")
|
15 |
|
|
|
16 |
# --- Load Model and Processor (Load once at startup) ---
|
17 |
print(f"Using device: {DEVICE}")
|
18 |
print(f"Loading processor from: {MODEL_IDENTIFIER}")
|
|
|
25 |
print("Model and processor loaded successfully.")
|
26 |
except Exception as e:
|
27 |
print(f"FATAL: Error loading model or processor: {e}")
|
|
|
28 |
raise gr.Error(f"Failed to load the model: {e}. Cannot start the application.") from e
|
29 |
|
30 |
# --- Prediction Function ---
|
31 |
def classify_image(image_pil):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
if image_pil is None:
|
|
|
33 |
print("Warning: No image provided.")
|
34 |
+
return {}
|
35 |
|
36 |
print("Processing image...")
|
37 |
try:
|
|
|
38 |
image = image_pil.convert("RGB")
|
|
|
|
|
39 |
inputs = processor(images=image, return_tensors="pt").to(DEVICE)
|
40 |
|
|
|
41 |
print("Running inference...")
|
42 |
with torch.no_grad():
|
43 |
outputs = model(**inputs)
|
44 |
logits = outputs.logits
|
45 |
|
46 |
+
probabilities = torch.softmax(logits, dim=-1)[0]
|
|
|
|
|
|
|
|
|
47 |
results = {}
|
48 |
for i, prob in enumerate(probabilities):
|
49 |
label = model.config.id2label[i]
|
50 |
+
results[label] = round(prob.item(), 4)
|
51 |
|
52 |
print(f"Prediction results: {results}")
|
53 |
return results
|
|
|
54 |
except Exception as e:
|
55 |
print(f"Error during prediction: {e}")
|
|
|
|
|
56 |
return {"Error": f"Processing failed. Please try again or use a different image."}
|
57 |
|
58 |
# --- Define Example Images ---
|
59 |
example_dir = "examples"
|
60 |
example_images = []
|
61 |
+
if os.path.exists(example_dir) and os.listdir(example_dir):
|
62 |
for img_name in os.listdir(example_dir):
|
63 |
if img_name.lower().endswith(('.png', '.jpg', '.jpeg', '.webp')):
|
64 |
+
example_images.append(os.path.join(example_dir, img_name))
|
65 |
if example_images:
|
66 |
print(f"Found examples: {example_images}")
|
67 |
else:
|
|
|
69 |
else:
|
70 |
print("No 'examples' directory found or it's empty. Examples will not be shown.")
|
71 |
|
72 |
+
# --- Custom CSS for Dark Theme Adjustments ---
|
73 |
+
# Minimal CSS - let the dark theme handle most things
|
74 |
css = """
|
75 |
+
body { font-family: 'Inter', sans-serif; }
|
76 |
|
77 |
/* Style the main title */
|
78 |
#app-title {
|
79 |
text-align: center;
|
80 |
font-weight: bold;
|
81 |
+
font-size: 2.5em;
|
82 |
+
margin-bottom: 5px;
|
83 |
+
/* color removed - let theme handle */
|
84 |
}
|
85 |
|
86 |
/* Style the description */
|
87 |
#app-description {
|
88 |
text-align: center;
|
89 |
font-size: 1.1em;
|
90 |
+
margin-bottom: 25px;
|
91 |
+
/* color removed - let theme handle */
|
92 |
}
|
93 |
+
#app-description code { /* Style model name - theme might handle this, but can force */
|
94 |
font-weight: bold;
|
95 |
+
background-color: rgba(255, 255, 255, 0.1); /* Slightly lighter background for code */
|
96 |
padding: 2px 5px;
|
97 |
border-radius: 4px;
|
98 |
+
color: #c5f7dc; /* Light green text for code block */
|
99 |
}
|
100 |
#app-description strong { /* Style device name */
|
101 |
+
color: #2dd4bf; /* Brighter teal/emerald for dark theme */
|
102 |
+
font-weight: bold;
|
103 |
}
|
104 |
|
|
|
|
|
|
|
|
|
105 |
/* Style the results heading */
|
106 |
#results-heading {
|
107 |
text-align: center;
|
108 |
+
font-size: 1.2em;
|
109 |
+
margin-bottom: 10px;
|
110 |
+
/* color removed - let theme handle */
|
111 |
}
|
112 |
|
113 |
+
/* Add some definition to input/output columns if needed */
|
|
|
|
|
|
|
|
|
114 |
#input-column, #output-column {
|
115 |
+
border: 1px solid #4b5563; /* Darker border for dark theme */
|
116 |
+
border-radius: 12px;
|
117 |
padding: 20px;
|
118 |
+
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); /* Subtle shadow, works on dark too */
|
119 |
+
/* background-color removed - let theme handle */
|
120 |
}
|
121 |
|
122 |
+
/* Ensure label text inside columns is readable */
|
123 |
+
#prediction-label .label-name { font-weight: bold; font-size: 1.1em; }
|
124 |
+
#prediction-label .confidence { font-size: 1em; }
|
125 |
+
/* Theme should make label text light, but force if needed: */
|
126 |
+
/* #prediction-label { color: #e5e7eb; } */
|
127 |
+
|
128 |
+
|
129 |
/* Footer styling */
|
130 |
#app-footer {
|
131 |
margin-top: 40px;
|
132 |
padding-top: 20px;
|
133 |
+
border-top: 1px solid #374151; /* Darker border for footer */
|
134 |
text-align: center;
|
135 |
font-size: 0.9em;
|
136 |
+
/* color removed - let theme handle */
|
137 |
+
}
|
138 |
+
#app-footer a {
|
139 |
+
color: #60a5fa; /* Lighter blue for links */
|
140 |
+
text-decoration: none;
|
141 |
+
}
|
142 |
+
#app-footer a:hover {
|
143 |
+
text-decoration: underline;
|
144 |
}
|
|
|
|
|
145 |
"""
|
146 |
|
147 |
# --- Gradio Interface using Blocks and Theme ---
|
148 |
+
# Apply .dark() to the theme
|
149 |
theme = gr.themes.Soft(
|
150 |
+
primary_hue="emerald",
|
151 |
secondary_hue="blue",
|
152 |
neutral_hue="slate",
|
153 |
+
radius_size=gr.themes.sizes.radius_lg,
|
154 |
+
spacing_size=gr.themes.sizes.spacing_lg,
|
155 |
+
).dark() # <<< APPLY DARK MODE HERE
|
|
|
|
|
|
|
|
|
156 |
|
157 |
|
158 |
with gr.Blocks(theme=theme, css=css) as iface:
|
159 |
+
# Title and Description
|
160 |
gr.Markdown("# AI vs Human Image Detector", elem_id="app-title")
|
161 |
gr.Markdown(
|
162 |
f"Upload an image to classify if it was likely generated by AI or created by a human. "
|
|
|
164 |
elem_id="app-description"
|
165 |
)
|
166 |
|
167 |
+
# Main layout
|
168 |
+
with gr.Row(variant='panel'): # Panel might look different in dark theme, adjust if needed
|
169 |
with gr.Column(scale=1, min_width=300, elem_id="input-column"):
|
170 |
image_input = gr.Image(
|
171 |
type="pil",
|
172 |
label="🖼️ Upload Your Image",
|
173 |
sources=["upload", "webcam", "clipboard"],
|
174 |
+
height=400,
|
175 |
)
|
176 |
+
submit_button = gr.Button("🔍 Classify Image", variant="primary")
|
177 |
|
178 |
with gr.Column(scale=1, min_width=300, elem_id="output-column"):
|
|
|
179 |
gr.Markdown("📊 **Prediction Results**", elem_id="results-heading")
|
180 |
result_output = gr.Label(
|
181 |
num_top_classes=2,
|
182 |
label="Classification",
|
183 |
elem_id="prediction-label"
|
184 |
+
# The theme should now correctly style the label text for dark mode
|
185 |
)
|
186 |
|
187 |
# Examples Section
|
188 |
+
if example_images:
|
189 |
gr.Examples(
|
190 |
examples=example_images,
|
191 |
inputs=image_input,
|
192 |
outputs=result_output,
|
193 |
fn=classify_image,
|
194 |
+
cache_examples=True,
|
195 |
label="✨ Click an Example to Try!"
|
196 |
+
# Examples appearance will also adapt to the dark theme
|
197 |
)
|
198 |
|
199 |
# Footer / Article section
|
200 |
+
# Removed explicit model ID formatting from Markdown string, use f-string
|
201 |
+
gr.Markdown(f"""
|
202 |
---
|
203 |
This application uses a fine-tuned [SigLIP](https://huggingface.co/docs/transformers/model_doc/siglip) vision model
|
204 |
specifically trained to differentiate between images generated by Artificial Intelligence and those created by humans.
|
205 |
|
206 |
+
You can find the model card here: <a href='https://huggingface.co/{MODEL_IDENTIFIER}' target='_blank'>{MODEL_IDENTIFIER}</a>
|
207 |
|
208 |
Fine tuning code available at [https://exnrt.com/blog/ai/fine-tuning-siglip2/](https://exnrt.com/blog/ai/fine-tuning-siglip2/).
|
209 |
+
""",
|
210 |
elem_id="app-footer"
|
211 |
)
|
212 |
|
213 |
+
# Connect events
|
|
|
214 |
submit_button.click(fn=classify_image, inputs=image_input, outputs=result_output, api_name="classify_image_button")
|
215 |
image_input.change(fn=classify_image, inputs=image_input, outputs=result_output, api_name="classify_image_change")
|
216 |
|
|
|
217 |
# --- Launch the App ---
|
218 |
if __name__ == "__main__":
|
219 |
print("Launching Gradio interface...")
|
220 |
+
iface.launch()
|
221 |
print("Gradio interface launched.")
|