fake-real / app.py
TruthLens's picture
Update app.py
d0c4209 verified
raw
history blame
4.89 kB
import os
from flask import Flask, request, render_template_string
from PIL import Image
import torch
from torchvision import models, transforms
from transformers import pipeline, CLIPProcessor, CLIPModel
app = Flask(__name__)
# Create the 'static/uploads' folder if it doesn't exist
upload_folder = os.path.join('static', 'uploads')
os.makedirs(upload_folder, exist_ok=True)
# Updated Fake News Detection Models
news_models = {
"mrm8488": pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection"),
"liam168": pipeline("text-classification", model="liam168/fake-news-bert-base-uncased"),
"distilbert": pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english")
}
# Updated Image Models for AI vs. Human Detection
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
ai_image_models = {
"clip-vit-base-patch32": clip_model
}
# Image transformation pipeline
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
# HTML Template with Model Selection
HTML_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>AI & News Detection</title>
<style>
body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; background-color: #f5f5f5; padding: 20px; }
.container { background: white; padding: 30px; border-radius: 12px; max-width: 800px; margin: auto; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); }
textarea, select { width: 100%; padding: 12px; margin-top: 10px; border-radius: 8px; border: 1px solid #ccc; }
button { background-color: #4CAF50; color: white; border: none; padding: 12px 20px; border-radius: 8px; cursor: pointer; font-size: 16px; margin-top: 10px; }
button:hover { background-color: #45a049; }
.result { background: #e7f3fe; padding: 15px; border-radius: 10px; margin-top: 20px; }
</style>
</head>
<body>
<div class="container">
<h1>πŸ“° Fake News Detection</h1>
<form method="POST" action="/detect">
<textarea name="text" placeholder="Enter news text..." required></textarea>
<label for="model">Select Fake News Model:</label>
<select name="model" required>
<option value="mrm8488">MRM8488 (BERT-Tiny)</option>
<option value="liam168">Liam168 (BERT)</option>
<option value="distilbert">DistilBERT (SST-2)</option>
</select>
<button type="submit">Detect News Authenticity</button>
</form>
{% if news_prediction %}
<div class="result">
<h2>🧠 News Detection Result:</h2>
<p>{{ news_prediction }}</p>
</div>
{% endif %}
<h1>πŸ–ΌοΈ AI vs. Human Image Detection</h1>
<form method="POST" action="/detect_image" enctype="multipart/form-data">
<input type="file" name="image" required>
<button type="submit">Upload and Detect</button>
</form>
{% if image_prediction %}
<div class="result">
<h2>πŸ“· Image Detection Result:</h2>
<p>{{ image_prediction }}</p>
</div>
{% endif %}
</div>
</body>
</html>
"""
@app.route("/", methods=["GET"])
def home():
return render_template_string(HTML_TEMPLATE)
@app.route("/detect", methods=["POST"])
def detect():
text = request.form.get("text")
model_key = request.form.get("model")
if not text or model_key not in news_models:
return render_template_string(HTML_TEMPLATE, news_prediction="Invalid input or model selection.")
result = news_models[model_key](text)[0]
label = "REAL" if result['label'].lower() in ["real", "label_1"] else "FAKE"
confidence = result['score'] * 100
return render_template_string(
HTML_TEMPLATE,
news_prediction=f"News is {label} (Confidence: {confidence:.2f}%)"
)
@app.route("/detect_image", methods=["POST"])
def detect_image():
if "image" not in request.files:
return render_template_string(HTML_TEMPLATE, image_prediction="No image uploaded.")
file = request.files["image"]
img = Image.open(file).convert("RGB")
inputs = clip_processor(images=img, return_tensors="pt")
with torch.no_grad():
image_features = ai_image_models["clip-vit-base-patch32"].get_image_features(**inputs)
prediction = "AI-Generated" if torch.mean(image_features).item() > 0 else "Human-Created"
return render_template_string(
HTML_TEMPLATE,
image_prediction=f"Prediction: {prediction}"
)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860) # Suitable for Hugging Face Spaces