TruthLens commited on
Commit
723860c
·
verified ·
1 Parent(s): d59602f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -70
app.py CHANGED
@@ -2,8 +2,7 @@ import os
2
  from flask import Flask, request, render_template_string
3
  from PIL import Image
4
  import torch
5
- from torchvision import transforms
6
- from transformers import pipeline, CLIPProcessor, CLIPModel
7
 
8
  app = Flask(__name__)
9
 
@@ -11,61 +10,28 @@ app = Flask(__name__)
11
  upload_folder = os.path.join('static', 'uploads')
12
  os.makedirs(upload_folder, exist_ok=True)
13
 
14
- # Fake News Detection Models
15
- news_models = {
16
- "mrm8488": pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection"),
17
- "google-electra": pipeline("text-classification", model="google/electra-base-discriminator"),
18
- "bert-base": pipeline("text-classification", model="bert-base-uncased")
19
- }
20
-
21
- # Image Model for AI vs. Human Detection
22
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
23
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
24
 
25
- # Image transformation pipeline
26
- transform = transforms.Compose([
27
- transforms.Resize((224, 224)),
28
- transforms.ToTensor(),
29
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
30
- ])
31
-
32
- # HTML Template
33
  HTML_TEMPLATE = """
34
  <!DOCTYPE html>
35
  <html lang="en">
36
  <head>
37
  <meta charset="UTF-8">
38
- <title>AI & News Detection</title>
39
  <style>
40
  body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; background-color: #f5f5f5; padding: 20px; }
41
  .container { background: white; padding: 30px; border-radius: 12px; max-width: 850px; margin: auto; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); }
42
- textarea, select { width: 100%; padding: 12px; margin-top: 10px; border-radius: 8px; border: 1px solid #ccc; }
43
- button { background-color: #4CAF50; color: white; border: none; padding: 12px 20px; border-radius: 8px; cursor: pointer; font-size: 16px; margin-top: 10px; }
44
  button:hover { background-color: #45a049; }
45
  .result { background: #e7f3fe; padding: 15px; border-radius: 10px; margin-top: 20px; }
46
  </style>
47
  </head>
48
  <body>
49
  <div class="container">
50
- <h1>📰 Fake News Detection</h1>
51
- <form method="POST" action="/detect">
52
- <textarea name="text" placeholder="Enter news text..." required></textarea>
53
- <label for="model">Select Fake News Model:</label>
54
- <select name="model" required>
55
- <option value="mrm8488">MRM8488 (BERT-Tiny)</option>
56
- <option value="google-electra">Google Electra (Base Discriminator)</option>
57
- <option value="bert-base">BERT-Base Uncased</option>
58
- </select>
59
- <button type="submit">Detect News Authenticity</button>
60
- </form>
61
-
62
- {% if news_prediction %}
63
- <div class="result">
64
- <h2>🧠 News Detection Result:</h2>
65
- <p>{{ news_prediction }}</p>
66
- </div>
67
- {% endif %}
68
-
69
  <h1>🖼️ AI vs. Human Image Detection</h1>
70
  <form method="POST" action="/detect_image" enctype="multipart/form-data">
71
  <input type="file" name="image" required>
@@ -76,7 +42,7 @@ HTML_TEMPLATE = """
76
  <div class="result">
77
  <h2>📷 Image Detection Result:</h2>
78
  <p>{{ image_prediction }}</p>
79
- <p><strong>Explanation:</strong> The model estimates image complexity by analyzing feature variability. Higher complexity typically indicates human-created content, while smoother, less varied features suggest AI generation.</p>
80
  </div>
81
  {% endif %}
82
  </div>
@@ -88,23 +54,6 @@ HTML_TEMPLATE = """
88
  def home():
89
  return render_template_string(HTML_TEMPLATE)
90
 
91
- @app.route("/detect", methods=["POST"])
92
- def detect():
93
- text = request.form.get("text")
94
- model_key = request.form.get("model")
95
-
96
- if not text or model_key not in news_models:
97
- return render_template_string(HTML_TEMPLATE, news_prediction="Invalid input or model selection.")
98
-
99
- result = news_models[model_key](text)[0]
100
- label = "REAL" if result['label'].lower() in ["real", "label_1", "neutral"] else "FAKE"
101
- confidence = result['score'] * 100
102
-
103
- return render_template_string(
104
- HTML_TEMPLATE,
105
- news_prediction=f"News is {label} (Confidence: {confidence:.2f}%)"
106
- )
107
-
108
  @app.route("/detect_image", methods=["POST"])
109
  def detect_image():
110
  if "image" not in request.files:
@@ -112,22 +61,27 @@ def detect_image():
112
 
113
  file = request.files["image"]
114
  img = Image.open(file).convert("RGB")
115
- inputs = clip_processor(images=img, return_tensors="pt")
 
 
 
 
 
116
 
117
  with torch.no_grad():
118
- image_features = clip_model.get_image_features(**inputs)
119
- feature_variance = torch.var(image_features).item()
120
- prediction = "Human-Created" if feature_variance > 0.05 else "AI-Generated"
121
 
122
- explanation = (
123
- f"Prediction: {prediction} (Feature Variance: {feature_variance:.4f}). "
124
- "Higher variance indicates complex, diverse features typical of human-created images, while lower variance suggests smoother, AI-generated patterns."
125
- )
126
 
127
- return render_template_string(
128
- HTML_TEMPLATE,
129
- image_prediction=explanation
130
  )
131
 
 
 
132
  if __name__ == "__main__":
133
- app.run(host="0.0.0.0", port=7860)
 
2
  from flask import Flask, request, render_template_string
3
  from PIL import Image
4
  import torch
5
+ from transformers import CLIPProcessor, CLIPModel
 
6
 
7
  app = Flask(__name__)
8
 
 
10
  upload_folder = os.path.join('static', 'uploads')
11
  os.makedirs(upload_folder, exist_ok=True)
12
 
13
+ # Load CLIP model and processor
 
 
 
 
 
 
 
14
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
15
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
16
 
17
+ # HTML Template with Model Selection and Explanations
 
 
 
 
 
 
 
18
  HTML_TEMPLATE = """
19
  <!DOCTYPE html>
20
  <html lang="en">
21
  <head>
22
  <meta charset="UTF-8">
23
+ <title>AI vs. Human Image Detection</title>
24
  <style>
25
  body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; background-color: #f5f5f5; padding: 20px; }
26
  .container { background: white; padding: 30px; border-radius: 12px; max-width: 850px; margin: auto; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); }
27
+ input[type="file"], button { width: 100%; padding: 12px; margin-top: 10px; border-radius: 8px; border: 1px solid #ccc; }
28
+ button { background-color: #4CAF50; color: white; border: none; font-size: 16px; cursor: pointer; }
29
  button:hover { background-color: #45a049; }
30
  .result { background: #e7f3fe; padding: 15px; border-radius: 10px; margin-top: 20px; }
31
  </style>
32
  </head>
33
  <body>
34
  <div class="container">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  <h1>🖼️ AI vs. Human Image Detection</h1>
36
  <form method="POST" action="/detect_image" enctype="multipart/form-data">
37
  <input type="file" name="image" required>
 
42
  <div class="result">
43
  <h2>📷 Image Detection Result:</h2>
44
  <p>{{ image_prediction }}</p>
45
+ <p><strong>Explanation:</strong> The model compares the image to text prompts representing AI-generated and human-created images. The prediction is based on which prompt the image is more similar to.</p>
46
  </div>
47
  {% endif %}
48
  </div>
 
54
  def home():
55
  return render_template_string(HTML_TEMPLATE)
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  @app.route("/detect_image", methods=["POST"])
58
  def detect_image():
59
  if "image" not in request.files:
 
61
 
62
  file = request.files["image"]
63
  img = Image.open(file).convert("RGB")
64
+
65
+ # Text prompts for comparison
66
+ prompts = ["AI-generated image", "Human-created image"]
67
+
68
+ # Process image and text inputs
69
+ inputs = clip_processor(text=prompts, images=img, return_tensors="pt", padding=True)
70
 
71
  with torch.no_grad():
72
+ outputs = clip_model(**inputs)
73
+ logits_per_image = outputs.logits_per_image.squeeze()
74
+ probs = logits_per_image.softmax(dim=0)
75
 
76
+ ai_score, human_score = probs.tolist()
77
+ prediction = "AI-Generated" if ai_score > human_score else "Human-Created"
 
 
78
 
79
+ result_text = (
80
+ f"Prediction: {prediction} <br>"
81
+ f"AI Similarity: {ai_score * 100:.2f}% | Human Similarity: {human_score * 100:.2f}%"
82
  )
83
 
84
+ return render_template_string(HTML_TEMPLATE, image_prediction=result_text)
85
+
86
  if __name__ == "__main__":
87
+ app.run(host="0.0.0.0", port=7860) # Suitable for public launch