Update app.py
Browse files
app.py
CHANGED
@@ -10,18 +10,14 @@ app = Flask(__name__)
|
|
10 |
upload_folder = os.path.join('static', 'uploads')
|
11 |
os.makedirs(upload_folder, exist_ok=True)
|
12 |
|
13 |
-
# Fake News Detection
|
14 |
-
|
15 |
-
"mrm8488": pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection"),
|
16 |
-
"google-electra": pipeline("text-classification", model="google/electra-base-discriminator"),
|
17 |
-
"bert-base": pipeline("text-classification", model="bert-base-uncased")
|
18 |
-
}
|
19 |
|
20 |
# Image Detection Model (CLIP-based)
|
21 |
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
22 |
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
23 |
|
24 |
-
# HTML Template with
|
25 |
HTML_TEMPLATE = """
|
26 |
<!DOCTYPE html>
|
27 |
<html lang="en">
|
@@ -42,19 +38,14 @@ HTML_TEMPLATE = """
|
|
42 |
<h1>π° Fake News Detection</h1>
|
43 |
<form method="POST" action="/detect">
|
44 |
<textarea name="text" placeholder="Enter news text..." required></textarea>
|
45 |
-
<label for="model">Select Fake News Model:</label>
|
46 |
-
<select name="model" required>
|
47 |
-
<option value="mrm8488">MRM8488 (BERT-Tiny)</option>
|
48 |
-
<option value="google-electra">Google Electra (Base Discriminator)</option>
|
49 |
-
<option value="bert-base">BERT-Base Uncased</option>
|
50 |
-
</select>
|
51 |
<button type="submit">Detect News Authenticity</button>
|
52 |
</form>
|
53 |
|
54 |
{% if news_prediction %}
|
55 |
<div class="result">
|
56 |
<h2>π§ News Detection Result:</h2>
|
57 |
-
<p>{{ news_prediction }}</p>
|
|
|
58 |
</div>
|
59 |
{% endif %}
|
60 |
|
@@ -68,7 +59,7 @@ HTML_TEMPLATE = """
|
|
68 |
<div class="result">
|
69 |
<h2>π· Image Detection Result:</h2>
|
70 |
<p>{{ image_prediction|safe }}</p>
|
71 |
-
<p><strong>Explanation:</strong> The model compares the uploaded image against
|
72 |
</div>
|
73 |
{% endif %}
|
74 |
</div>
|
@@ -83,16 +74,17 @@ def home():
|
|
83 |
@app.route("/detect", methods=["POST"])
|
84 |
def detect():
|
85 |
text = request.form.get("text")
|
86 |
-
|
|
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
result = news_models[model_key](text)[0]
|
92 |
-
label = "REAL" if result['label'].lower() in ["real", "label_1", "neutral"] else "FAKE"
|
93 |
confidence = result['score'] * 100
|
94 |
|
95 |
-
prediction_text =
|
|
|
|
|
|
|
96 |
return render_template_string(HTML_TEMPLATE, news_prediction=prediction_text)
|
97 |
|
98 |
@app.route("/detect_image", methods=["POST"])
|
@@ -103,7 +95,6 @@ def detect_image():
|
|
103 |
file = request.files["image"]
|
104 |
img = Image.open(file).convert("RGB")
|
105 |
|
106 |
-
# Compare with AI and Human prompts
|
107 |
prompts = ["AI-generated image", "Human-created image"]
|
108 |
inputs = clip_processor(text=prompts, images=img, return_tensors="pt", padding=True)
|
109 |
|
|
|
10 |
upload_folder = os.path.join('static', 'uploads')
|
11 |
os.makedirs(upload_folder, exist_ok=True)
|
12 |
|
13 |
+
# Better Fake News Detection Model (DeBERTa for improved performance)
|
14 |
+
news_model = pipeline("text-classification", model="microsoft/deberta-v3-base-mnli")
|
|
|
|
|
|
|
|
|
15 |
|
16 |
# Image Detection Model (CLIP-based)
|
17 |
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
18 |
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
19 |
|
20 |
+
# HTML Template with enhanced explanations
|
21 |
HTML_TEMPLATE = """
|
22 |
<!DOCTYPE html>
|
23 |
<html lang="en">
|
|
|
38 |
<h1>π° Fake News Detection</h1>
|
39 |
<form method="POST" action="/detect">
|
40 |
<textarea name="text" placeholder="Enter news text..." required></textarea>
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
<button type="submit">Detect News Authenticity</button>
|
42 |
</form>
|
43 |
|
44 |
{% if news_prediction %}
|
45 |
<div class="result">
|
46 |
<h2>π§ News Detection Result:</h2>
|
47 |
+
<p>{{ news_prediction|safe }}</p>
|
48 |
+
<p><strong>Explanation:</strong> The model analyzes the text and classifies it as REAL or FAKE based on linguistic patterns, fact alignment, and contextual cues. High confidence indicates stronger evidence for the classification.</p>
|
49 |
</div>
|
50 |
{% endif %}
|
51 |
|
|
|
59 |
<div class="result">
|
60 |
<h2>π· Image Detection Result:</h2>
|
61 |
<p>{{ image_prediction|safe }}</p>
|
62 |
+
<p><strong>Explanation:</strong> The model compares the uploaded image against prompts like "AI-generated image" and "Human-created image." Higher similarity to the AI prompt suggests an AI-generated image, and vice versa.</p>
|
63 |
</div>
|
64 |
{% endif %}
|
65 |
</div>
|
|
|
74 |
@app.route("/detect", methods=["POST"])
|
75 |
def detect():
|
76 |
text = request.form.get("text")
|
77 |
+
if not text:
|
78 |
+
return render_template_string(HTML_TEMPLATE, news_prediction="Invalid input.")
|
79 |
|
80 |
+
result = news_model(text)[0]
|
81 |
+
label = "REAL" if result['label'].lower() in ["entailment", "neutral"] else "FAKE"
|
|
|
|
|
|
|
82 |
confidence = result['score'] * 100
|
83 |
|
84 |
+
prediction_text = (
|
85 |
+
f"News is <strong>{label}</strong> (Confidence: {confidence:.2f}%)<br>"
|
86 |
+
f"Reasoning: The model evaluated the likelihood that the provided text aligns with factual information versus being contradictory or fabricated."
|
87 |
+
)
|
88 |
return render_template_string(HTML_TEMPLATE, news_prediction=prediction_text)
|
89 |
|
90 |
@app.route("/detect_image", methods=["POST"])
|
|
|
95 |
file = request.files["image"]
|
96 |
img = Image.open(file).convert("RGB")
|
97 |
|
|
|
98 |
prompts = ["AI-generated image", "Human-created image"]
|
99 |
inputs = clip_processor(text=prompts, images=img, return_tensors="pt", padding=True)
|
100 |
|