hackerbyhobby commited on
Commit
65494f9
·
unverified ·
1 Parent(s): 04508b8

tuesday openai

Browse files
Files changed (3) hide show
  1. app.py +175 -16
  2. app.py.bestoftues +210 -0
  3. requirements.txt +1 -0
app.py CHANGED
@@ -5,6 +5,11 @@ from transformers import pipeline
5
  import re
6
  from langdetect import detect
7
  from deep_translator import GoogleTranslator
 
 
 
 
 
8
 
9
  # Translator instance
10
  translator = GoogleTranslator(source="auto", target="es")
@@ -23,7 +28,7 @@ CANDIDATE_LABELS = ["SMiShing", "Other Scam", "Legitimate"]
23
 
24
  def get_keywords_by_language(text: str):
25
  """
26
- Detect language using `langdetect` and translate keywords if needed.
27
  """
28
  snippet = text[:200]
29
  try:
@@ -88,9 +93,129 @@ def boost_probabilities(probabilities: dict, text: str):
88
  "detected_lang": detected_lang
89
  }
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  def smishing_detector(input_type, text, image):
92
  """
93
- Main detection function combining text (if 'Text') and OCR (if 'Screenshot').
 
 
 
94
  """
95
  if input_type == "Text":
96
  combined_text = text.strip() if text else ""
@@ -105,31 +230,44 @@ def smishing_detector(input_type, text, image):
105
  "label": "No text provided",
106
  "confidence": 0.0,
107
  "keywords_found": [],
108
- "urls_found": []
 
 
 
109
  }
110
 
111
- result = classifier(
 
112
  sequences=combined_text,
113
  candidate_labels=CANDIDATE_LABELS,
114
  hypothesis_template="This message is {}."
115
  )
116
- original_probs = {k: float(v) for k, v in zip(result["labels"], result["scores"])}
117
 
 
118
  boosted = boost_probabilities(original_probs, combined_text)
 
 
 
 
 
 
 
 
119
 
120
- # Patched snippet begins
121
- # 1. Extract language first, preserving it
122
- detected_lang = boosted.get("detected_lang", "en")
123
- # 2. Remove it so only numeric keys remain
124
- boosted.pop("detected_lang", None)
125
- # 3. Convert numeric values to float
126
- for k, v in boosted.items():
127
- boosted[k] = float(v)
128
- # Patched snippet ends
129
 
 
 
 
 
130
  final_label = max(boosted, key=boosted.get)
131
  final_confidence = round(boosted[final_label], 3)
132
 
 
133
  lower_text = combined_text.lower()
134
  smishing_keys, scam_keys, _ = get_keywords_by_language(combined_text)
135
 
@@ -137,16 +275,34 @@ def smishing_detector(input_type, text, image):
137
  found_smishing = [kw for kw in smishing_keys if kw in lower_text]
138
  found_other_scam = [kw for kw in scam_keys if kw in lower_text]
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  return {
141
  "detected_language": detected_lang,
142
  "text_used_for_classification": combined_text,
143
  "original_probabilities": {k: round(v, 3) for k, v in original_probs.items()},
144
- "boosted_probabilities": {k: round(v, 3) for k, v in boosted.items()},
 
 
 
145
  "label": final_label,
146
  "confidence": final_confidence,
147
  "smishing_keywords_found": found_smishing,
148
  "other_scam_keywords_found": found_other_scam,
149
  "urls_found": found_urls,
 
150
  }
151
 
152
  #
@@ -165,7 +321,7 @@ def toggle_inputs(choice):
165
  return gr.update(visible=False), gr.update(visible=True)
166
 
167
  with gr.Blocks() as demo:
168
- gr.Markdown("## SMiShing & Scam Detector (Choose Text or Screenshot)")
169
 
170
  with gr.Row():
171
  input_type = gr.Radio(
@@ -207,4 +363,7 @@ with gr.Blocks() as demo:
207
  )
208
 
209
  if __name__ == "__main__":
 
 
 
210
  demo.launch()
 
5
  import re
6
  from langdetect import detect
7
  from deep_translator import GoogleTranslator
8
+ import openai
9
+ import os
10
+
11
+ # Set up your OpenAI API key
12
+ openai.api_key = os.getenv("OPENAI_API_KEY")
13
 
14
  # Translator instance
15
  translator = GoogleTranslator(source="auto", target="es")
 
28
 
29
  def get_keywords_by_language(text: str):
30
  """
31
+ Detect language using langdetect and translate keywords if needed.
32
  """
33
  snippet = text[:200]
34
  try:
 
93
  "detected_lang": detected_lang
94
  }
95
 
96
+ def query_llm_for_classification(raw_message: str) -> dict:
97
+ """
98
+ First LLM call: asks for a classification (SMiShing, Other Scam, or Legitimate)
99
+ acting as a cybersecurity expert. Returns label and short reason.
100
+ """
101
+ if not raw_message.strip():
102
+ return {"label": "Unknown", "reason": "No message provided to the LLM."}
103
+
104
+ system_prompt = (
105
+ "You are a cybersecurity expert. You will classify the user's message "
106
+ "as one of: SMiShing, Other Scam, or Legitimate. Provide a short reason. "
107
+ "Return only JSON with keys: label, reason."
108
+ )
109
+ user_prompt = f"Message: {raw_message}\nClassify it as SMiShing, Other Scam, or Legitimate."
110
+
111
+ try:
112
+ response = openai.ChatCompletion.create(
113
+ model="gpt-3.5-turbo",
114
+ messages=[
115
+ {"role": "system", "content": system_prompt},
116
+ {"role": "user", "content": user_prompt}
117
+ ],
118
+ temperature=0.2
119
+ )
120
+ raw_reply = response["choices"][0]["message"]["content"].strip()
121
+
122
+ import json
123
+ # Expect something like {"label": "...", "reason": "..."}
124
+ llm_result = json.loads(raw_reply)
125
+ if "label" not in llm_result or "reason" not in llm_result:
126
+ return {"label": "Unknown", "reason": f"Unexpected format: {raw_reply}"}
127
+
128
+ return llm_result
129
+
130
+ except Exception as e:
131
+ return {"label": "Unknown", "reason": f"LLM error: {e}"}
132
+
133
+ def incorporate_llm_label(boosted: dict, llm_label: str) -> dict:
134
+ """
135
+ Adjust the final probabilities based on the LLM's classification.
136
+ If LLM says SMiShing, add +0.2 to SMiShing, etc. Then clamp & re-normalize.
137
+ """
138
+ if llm_label == "SMiShing":
139
+ boosted["SMiShing"] += 0.2
140
+ elif llm_label == "Other Scam":
141
+ boosted["Other Scam"] += 0.2
142
+ elif llm_label == "Legitimate":
143
+ boosted["Legitimate"] += 0.2
144
+ # else "Unknown" => do nothing
145
+
146
+ # clamp
147
+ for k in boosted:
148
+ if boosted[k] < 0:
149
+ boosted[k] = 0.0
150
+
151
+ total = sum(boosted.values())
152
+ if total > 0:
153
+ for k in boosted:
154
+ boosted[k] /= total
155
+ else:
156
+ # fallback
157
+ boosted["Legitimate"] = 1.0
158
+ boosted["SMiShing"] = 0.0
159
+ boosted["Other Scam"] = 0.0
160
+
161
+ return boosted
162
+
163
+ def query_llm_for_explanation(
164
+ text: str,
165
+ final_label: str,
166
+ final_conf: float,
167
+ local_label: str,
168
+ local_conf: float,
169
+ llm_label: str,
170
+ llm_reason: str,
171
+ found_smishing: list,
172
+ found_other_scam: list,
173
+ found_urls: list
174
+ ) -> str:
175
+ """
176
+ Second LLM call: provides a holistic explanation of the final classification.
177
+ We include the local classification info, the LLM’s own classification, and
178
+ relevant details (keywords, URLs).
179
+ """
180
+ system_prompt = (
181
+ "You are a cybersecurity expert providing a final explanation to the user. "
182
+ "Combine the local classification, the LLM classification, and the final label "
183
+ "into one concise explanation. Do not reveal internal code or raw JSON; just give "
184
+ "a short, user-friendly explanation. End with a final statement of the final label."
185
+ )
186
+ user_context = f"""
187
+ User Message:
188
+ {text}
189
+
190
+ Local Classification => Label: {local_label}, Confidence: {local_conf}
191
+ LLM Classification => Label: {llm_label}, Reason: {llm_reason}
192
+ Final Overall Label => {final_label} (confidence {final_conf})
193
+
194
+ Suspicious SMiShing Keywords => {found_smishing}
195
+ Suspicious Other Scam Keywords => {found_other_scam}
196
+ URLs => {found_urls}
197
+ """
198
+ # The LLM can combine these facts into a short paragraph.
199
+ try:
200
+ response = openai.ChatCompletion.create(
201
+ model="gpt-3.5-turbo",
202
+ messages=[
203
+ {"role": "system", "content": system_prompt},
204
+ {"role": "user", "content": user_context}
205
+ ],
206
+ temperature=0.2
207
+ )
208
+ final_explanation = response["choices"][0]["message"]["content"].strip()
209
+ return final_explanation
210
+ except Exception as e:
211
+ return f"Could not generate final explanation due to error: {e}"
212
+
213
  def smishing_detector(input_type, text, image):
214
  """
215
+ Main detection function combining text (if 'Text') & OCR (if 'Screenshot'),
216
+ plus two LLM calls:
217
+ 1) classification to adjust final probabilities,
218
+ 2) a final explanation summarizing the outcome.
219
  """
220
  if input_type == "Text":
221
  combined_text = text.strip() if text else ""
 
230
  "label": "No text provided",
231
  "confidence": 0.0,
232
  "keywords_found": [],
233
+ "urls_found": [],
234
+ "llm_label": "Unknown",
235
+ "llm_reason": "No text to analyze",
236
+ "final_explanation": "No text provided"
237
  }
238
 
239
+ # 1. Local zero-shot classification
240
+ local_result = classifier(
241
  sequences=combined_text,
242
  candidate_labels=CANDIDATE_LABELS,
243
  hypothesis_template="This message is {}."
244
  )
245
+ original_probs = {k: float(v) for k, v in zip(local_result["labels"], local_result["scores"])}
246
 
247
+ # 2. Basic boosting from keywords & URLs
248
  boosted = boost_probabilities(original_probs, combined_text)
249
+ detected_lang = boosted.pop("detected_lang", "en")
250
+
251
+ # Convert to float only
252
+ for k in boosted:
253
+ boosted[k] = float(boosted[k])
254
+
255
+ local_label = max(boosted, key=boosted.get)
256
+ local_conf = round(boosted[local_label], 3)
257
 
258
+ # 3. LLM Classification
259
+ llm_classification = query_llm_for_classification(combined_text)
260
+ llm_label = llm_classification.get("label", "Unknown")
261
+ llm_reason = llm_classification.get("reason", "No reason provided")
 
 
 
 
 
262
 
263
+ # 4. Incorporate LLM’s label into final probabilities
264
+ boosted = incorporate_llm_label(boosted, llm_label)
265
+
266
+ # Now we have updated probabilities
267
  final_label = max(boosted, key=boosted.get)
268
  final_confidence = round(boosted[final_label], 3)
269
 
270
+ # 5. Gather found keywords & URLs
271
  lower_text = combined_text.lower()
272
  smishing_keys, scam_keys, _ = get_keywords_by_language(combined_text)
273
 
 
275
  found_smishing = [kw for kw in smishing_keys if kw in lower_text]
276
  found_other_scam = [kw for kw in scam_keys if kw in lower_text]
277
 
278
+ # 6. Final LLM explanation
279
+ final_explanation = query_llm_for_explanation(
280
+ text=combined_text,
281
+ final_label=final_label,
282
+ final_conf=final_confidence,
283
+ local_label=local_label,
284
+ local_conf=local_conf,
285
+ llm_label=llm_label,
286
+ llm_reason=llm_reason,
287
+ found_smishing=found_smishing,
288
+ found_other_scam=found_other_scam,
289
+ found_urls=found_urls
290
+ )
291
+
292
  return {
293
  "detected_language": detected_lang,
294
  "text_used_for_classification": combined_text,
295
  "original_probabilities": {k: round(v, 3) for k, v in original_probs.items()},
296
+ "boosted_probabilities_before_llm": {local_label: local_conf},
297
+ "llm_label": llm_label,
298
+ "llm_reason": llm_reason,
299
+ "boosted_probabilities_after_llm": {k: round(v, 3) for k, v in boosted.items()},
300
  "label": final_label,
301
  "confidence": final_confidence,
302
  "smishing_keywords_found": found_smishing,
303
  "other_scam_keywords_found": found_other_scam,
304
  "urls_found": found_urls,
305
+ "final_explanation": final_explanation,
306
  }
307
 
308
  #
 
321
  return gr.update(visible=False), gr.update(visible=True)
322
 
323
  with gr.Blocks() as demo:
324
+ gr.Markdown("## SMiShing & Scam Detector with LLM-Enhanced Logic")
325
 
326
  with gr.Row():
327
  input_type = gr.Radio(
 
363
  )
364
 
365
  if __name__ == "__main__":
366
+ # Warn if openai.api_key not set
367
+ if not openai.api_key:
368
+ print("WARNING: OPENAI_API_KEY not set. LLM calls will fail or be skipped.")
369
  demo.launch()
app.py.bestoftues ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pytesseract
3
+ from PIL import Image
4
+ from transformers import pipeline
5
+ import re
6
+ from langdetect import detect
7
+ from deep_translator import GoogleTranslator
8
+
9
+ # Translator instance
10
+ translator = GoogleTranslator(source="auto", target="es")
11
+
12
+ # 1. Load separate keywords for SMiShing and Other Scam (assumed in English)
13
+ with open("smishing_keywords.txt", "r", encoding="utf-8") as f:
14
+ SMISHING_KEYWORDS = [line.strip().lower() for line in f if line.strip()]
15
+
16
+ with open("other_scam_keywords.txt", "r", encoding="utf-8") as f:
17
+ OTHER_SCAM_KEYWORDS = [line.strip().lower() for line in f if line.strip()]
18
+
19
+ # 2. Zero-Shot Classification Pipeline
20
+ model_name = "joeddav/xlm-roberta-large-xnli"
21
+ classifier = pipeline("zero-shot-classification", model=model_name)
22
+ CANDIDATE_LABELS = ["SMiShing", "Other Scam", "Legitimate"]
23
+
24
+ def get_keywords_by_language(text: str):
25
+ """
26
+ Detect language using `langdetect` and translate keywords if needed.
27
+ """
28
+ snippet = text[:200]
29
+ try:
30
+ detected_lang = detect(snippet)
31
+ except Exception:
32
+ detected_lang = "en"
33
+
34
+ if detected_lang == "es":
35
+ smishing_in_spanish = [
36
+ translator.translate(kw).lower() for kw in SMISHING_KEYWORDS
37
+ ]
38
+ other_scam_in_spanish = [
39
+ translator.translate(kw).lower() for kw in OTHER_SCAM_KEYWORDS
40
+ ]
41
+ return smishing_in_spanish, other_scam_in_spanish, "es"
42
+ else:
43
+ return SMISHING_KEYWORDS, OTHER_SCAM_KEYWORDS, "en"
44
+
45
+ def boost_probabilities(probabilities: dict, text: str):
46
+ """
47
+ Boost probabilities based on keyword matches and presence of URLs.
48
+ """
49
+ lower_text = text.lower()
50
+ smishing_keywords, other_scam_keywords, detected_lang = get_keywords_by_language(text)
51
+
52
+ smishing_count = sum(1 for kw in smishing_keywords if kw in lower_text)
53
+ other_scam_count = sum(1 for kw in other_scam_keywords if kw in lower_text)
54
+
55
+ smishing_boost = 0.30 * smishing_count
56
+ other_scam_boost = 0.30 * other_scam_count
57
+
58
+ found_urls = re.findall(r"(https?://[^\s]+)", lower_text)
59
+ if found_urls:
60
+ smishing_boost += 0.35
61
+
62
+ p_smishing = probabilities.get("SMiShing", 0.0)
63
+ p_other_scam = probabilities.get("Other Scam", 0.0)
64
+ p_legit = probabilities.get("Legitimate", 1.0)
65
+
66
+ p_smishing += smishing_boost
67
+ p_other_scam += other_scam_boost
68
+ p_legit -= (smishing_boost + other_scam_boost)
69
+
70
+ # Clamp
71
+ p_smishing = max(p_smishing, 0.0)
72
+ p_other_scam = max(p_other_scam, 0.0)
73
+ p_legit = max(p_legit, 0.0)
74
+
75
+ # Re-normalize
76
+ total = p_smishing + p_other_scam + p_legit
77
+ if total > 0:
78
+ p_smishing /= total
79
+ p_other_scam /= total
80
+ p_legit /= total
81
+ else:
82
+ p_smishing, p_other_scam, p_legit = 0.0, 0.0, 1.0
83
+
84
+ return {
85
+ "SMiShing": p_smishing,
86
+ "Other Scam": p_other_scam,
87
+ "Legitimate": p_legit,
88
+ "detected_lang": detected_lang
89
+ }
90
+
91
+ def smishing_detector(input_type, text, image):
92
+ """
93
+ Main detection function combining text (if 'Text') and OCR (if 'Screenshot').
94
+ """
95
+ if input_type == "Text":
96
+ combined_text = text.strip() if text else ""
97
+ else:
98
+ combined_text = ""
99
+ if image is not None:
100
+ combined_text = pytesseract.image_to_string(image, lang="spa+eng").strip()
101
+
102
+ if not combined_text:
103
+ return {
104
+ "text_used_for_classification": "(none)",
105
+ "label": "No text provided",
106
+ "confidence": 0.0,
107
+ "keywords_found": [],
108
+ "urls_found": []
109
+ }
110
+
111
+ result = classifier(
112
+ sequences=combined_text,
113
+ candidate_labels=CANDIDATE_LABELS,
114
+ hypothesis_template="This message is {}."
115
+ )
116
+ original_probs = {k: float(v) for k, v in zip(result["labels"], result["scores"])}
117
+
118
+ boosted = boost_probabilities(original_probs, combined_text)
119
+
120
+ # Patched snippet begins
121
+ # 1. Extract language first, preserving it
122
+ detected_lang = boosted.get("detected_lang", "en")
123
+ # 2. Remove it so only numeric keys remain
124
+ boosted.pop("detected_lang", None)
125
+ # 3. Convert numeric values to float
126
+ for k, v in boosted.items():
127
+ boosted[k] = float(v)
128
+ # Patched snippet ends
129
+
130
+ final_label = max(boosted, key=boosted.get)
131
+ final_confidence = round(boosted[final_label], 3)
132
+
133
+ lower_text = combined_text.lower()
134
+ smishing_keys, scam_keys, _ = get_keywords_by_language(combined_text)
135
+
136
+ found_urls = re.findall(r"(https?://[^\s]+)", lower_text)
137
+ found_smishing = [kw for kw in smishing_keys if kw in lower_text]
138
+ found_other_scam = [kw for kw in scam_keys if kw in lower_text]
139
+
140
+ return {
141
+ "detected_language": detected_lang,
142
+ "text_used_for_classification": combined_text,
143
+ "original_probabilities": {k: round(v, 3) for k, v in original_probs.items()},
144
+ "boosted_probabilities": {k: round(v, 3) for k, v in boosted.items()},
145
+ "label": final_label,
146
+ "confidence": final_confidence,
147
+ "smishing_keywords_found": found_smishing,
148
+ "other_scam_keywords_found": found_other_scam,
149
+ "urls_found": found_urls,
150
+ }
151
+
152
+ #
153
+ # Gradio interface with dynamic visibility
154
+ #
155
+ def toggle_inputs(choice):
156
+ """
157
+ Return updates for (text_input, image_input) based on the radio selection.
158
+ """
159
+ if choice == "Text":
160
+ # Show text input, hide image
161
+ return gr.update(visible=True), gr.update(visible=False)
162
+ else:
163
+ # choice == "Screenshot"
164
+ # Hide text input, show image
165
+ return gr.update(visible=False), gr.update(visible=True)
166
+
167
+ with gr.Blocks() as demo:
168
+ gr.Markdown("## SMiShing & Scam Detector (Choose Text or Screenshot)")
169
+
170
+ with gr.Row():
171
+ input_type = gr.Radio(
172
+ choices=["Text", "Screenshot"],
173
+ value="Text",
174
+ label="Choose Input Type"
175
+ )
176
+
177
+ text_input = gr.Textbox(
178
+ lines=3,
179
+ label="Paste Suspicious SMS Text",
180
+ placeholder="Type or paste the message here...",
181
+ visible=True # default
182
+ )
183
+
184
+ image_input = gr.Image(
185
+ type="pil",
186
+ label="Upload Screenshot",
187
+ visible=False # hidden by default
188
+ )
189
+
190
+ # Whenever input_type changes, toggle which input is visible
191
+ input_type.change(
192
+ fn=toggle_inputs,
193
+ inputs=input_type,
194
+ outputs=[text_input, image_input],
195
+ queue=False
196
+ )
197
+
198
+ # Button to run classification
199
+ analyze_btn = gr.Button("Classify")
200
+ output_json = gr.JSON(label="Result")
201
+
202
+ # On button click, call the smishing_detector
203
+ analyze_btn.click(
204
+ fn=smishing_detector,
205
+ inputs=[input_type, text_input, image_input],
206
+ outputs=output_json
207
+ )
208
+
209
+ if __name__ == "__main__":
210
+ demo.launch()
requirements.txt CHANGED
@@ -9,3 +9,4 @@ httpx==0.13.3
9
  sentencepiece==0.1.99
10
  numpy==1.25.0
11
  shap==0.41.0
 
 
9
  sentencepiece==0.1.99
10
  numpy==1.25.0
11
  shap==0.41.0
12
+ openai