Spaces:
Running
Running
File size: 4,890 Bytes
2e8ccd8 a262dd1 ff41b45 a262dd1 52035bd 2e8ccd8 6098ec9 d744498 23d88ae a262dd1 ff41b45 a262dd1 ff41b45 a262dd1 0dc044a 6098ec9 ff41b45 0dc044a 6098ec9 0dc044a 23d88ae ff41b45 23d88ae 0dc044a 2e8ccd8 23d88ae 2e8ccd8 23d88ae 2e8ccd8 23d88ae 2e8ccd8 23d88ae a262dd1 d744498 0dc044a 6098ec9 23d88ae 2e8ccd8 d744498 23d88ae 2e8ccd8 23d88ae 2e8ccd8 18855ba d744498 18855ba 23d88ae 18855ba 23d88ae d744498 18855ba 23d88ae 2e8ccd8 a262dd1 f7d4f80 23d88ae d744498 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import gradio as gr
from transformers import ViTForImageClassification, ViTImageProcessor
from PIL import Image
import torch
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load the model and processor from Hugging Face
model = ViTForImageClassification.from_pretrained("prithivMLmods/Deep-Fake-Detector-v2-Model")
processor = ViTImageProcessor.from_pretrained("prithivMLmods/Deep-Fake-Detector-v2-Model")
# Log model configuration to verify label mapping
logger.info(f"Model label mapping: {model.config.id2label}")
def detect(image, confidence_threshold=0.5):
if image is None:
raise gr.Error("Please upload an image to analyze")
try:
pil_image = Image.open(image).convert("RGB")
pil_image = pil_image.resize((224, 224), Image.Resampling.LANCZOS)
inputs = processor(images=pil_image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
probabilities = torch.softmax(logits, dim=1)[0]
confidence_real = probabilities[0].item() * 100 # Probability of being Real
confidence_fake = probabilities[1].item() * 100 # Probability of being Fake
id2label = model.config.id2label
predicted_class = torch.argmax(logits, dim=1).item()
predicted_label = id2label[predicted_class]
threshold_predicted = "Fake" if confidence_fake / 100 >= confidence_threshold else "Real"
confidence_score = max(confidence_real, confidence_fake)
# Use raw probabilities for clarity, with optional fine-tuning for specific categories
aigen_likelihood = confidence_fake # Assuming AI-Generated is synonymous with Fake for simplicity
face_manipulation_likelihood = confidence_fake # You might want to refine this with additional analysis
# Format outputs
overall = f"{confidence_score:.1f}% Confidence ({threshold_predicted})"
aigen = f"{aigen_likelihood:.1f}% (AI-Generated Content Likelihood)"
deepfake = f"{face_manipulation_likelihood:.1f}% (Face Manipulation Likelihood)"
return overall, aigen, deepfake
except Exception as e:
logger.error(f"Error during analysis: {str(e)}")
raise gr.Error(f"Analysis error: {str(e)}")
custom_css = """
.container {
max-width: 1200px;
margin: 0 auto;
padding: 20px;
font-family: 'Arial', sans-serif;
}
.header {
color: #2c3e50;
border-bottom: 2px solid #3498db;
padding-bottom: 10px;
}
.button-gradient {
background: linear-gradient(45deg, #3498db, #2ecc71, #9b59b6);
background-size: 400% 400%;
border: none;
padding: 12px 24px;
font-size: 16px;
font-weight: 600;
color: white;
border-radius: 8px;
cursor: pointer;
transition: all 0.3s ease;
animation: gradientAnimation 3s ease infinite;
box-shadow: 0 2px 8px rgba(52, 152, 219, 0.3);
}
.button-gradient:hover {
transform: translateY(-2px);
box-shadow: 0 4px 12px rgba(52, 152, 219, 0.5);
}
@keyframes gradientAnimation {
0% { background-position: 0% 50%; }
50% { background-position: 100% 50%; }
100% { background-position: 0% 50%; }
}
"""
MARKDOWN0 = """
<div class="header">
<h1>DeepFake Detection System</h1>
<p>Advanced AI-powered analysis for identifying manipulated media<br>
Powered by prithivMLmods/Deep-Fake-Detector-Model (Updated Jan 2025)<br>
<ul>
<li><strong>Confidence Score:</strong> Overall probability the image is Real or Fake (based on threshold).</li>
<li><strong>AI-Generated Content Likelihood:</strong> Probability the image was fully generated by AI.</li>
<li><strong>Face Manipulation Likelihood:</strong> Probability the image contains manipulated faces (e.g., swaps or alterations).</li>
</ul>
Adjust threshold to tune sensitivity; check logs for detailed output</p>
</div>
"""
# Create Gradio interface with threshold slider
with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as demo:
gr.Markdown(MARKDOWN0)
with gr.Row(elem_classes="container"):
with gr.Column(scale=1):
image = gr.Image(type='filepath', height=400, label="Upload Image")
threshold = gr.Slider(0, 1, value=0.5, step=0.01, label="Confidence Threshold (Fake)")
detect_button = gr.Button("Analyze Image", elem_classes="button-gradient")
with gr.Column(scale=2):
overall = gr.Label(label="Confidence Score")
aigen = gr.Label(label="AI-Generated Content")
deepfake = gr.Label(label="Face Manipulation")
detect_button.click(
fn=detect,
inputs=[image, threshold],
outputs=[overall, aigen, deepfake]
)
# Launch the application
demo.launch(
debug=True
) |