pmkhanh7890 commited on
Commit
1ce1659
Β·
1 Parent(s): 050e30d

1st version of demo

Browse files
app.py CHANGED
@@ -1,400 +1,22 @@
1
- import warnings
 
2
 
3
- import torchvision.transforms as transforms
4
- from google_img_source_search import ReverseImageSearcher
5
 
6
- # from src.images.CNN_model_classifier import predict_cnn
7
- # from src.images.diffusion_model_classifier import (
8
- # ImageClassifier,
9
- # predict_single_image,
10
- # )
11
-
12
- warnings.simplefilter(
13
- action="ignore",
14
- category=FutureWarning,
15
- ) # disable FutureWarning
16
-
17
- import gradio as gr # noqa: E402
18
- from transformers import ( # noqa: E402
19
- AutoModelForSequenceClassification,
20
- AutoTokenizer,
21
- pipeline,
22
- )
23
-
24
- from src.texts.MAGE.deployment import ( # noqa: E402
25
- detect,
26
- preprocess,
27
- )
28
- from src.texts.PASTED.pasted_lexicon import Detector # noqa: E402
29
- from src.texts.Search_Text.search import ( # noqa: E402
30
- get_important_sentences,
31
- get_keywords,
32
- is_human_written,
33
- )
34
- from src.images.Search_Image.search import (
35
- compare_images,
36
- get_image_from_path,
37
- get_image_from_url,
38
- )
39
-
40
-
41
- def convert_score_range(score):
42
- """
43
- Converts a score from the range [0, 1] to [-1, 1].
44
-
45
- Args:
46
- score: The original score in the range [0, 1].
47
-
48
- Returns:
49
- The converted score in the range [-1, 1].
50
- """
51
-
52
- return 2 * score - 1
53
-
54
-
55
- def generate_highlighted_text(text_scores):
56
- """
57
- Generates a highlighted text string based on the given text and scores.
58
-
59
- Args:
60
- text_scores: A list of tuples, where each tuple contains a text
61
- segment and its score.
62
-
63
- Returns:
64
- A string of HTML code with highlighted text.
65
- """
66
- highlighted_text = ""
67
- for text, score in text_scores:
68
- # Map score to a color using a gradient
69
- color = f"rgba(255, 0, 0, {1 - score})" # Red to green gradient
70
- highlighted_text += (
71
- f"<span style='background-color: {color}'>{text}</span>" # noqa
72
- )
73
- return highlighted_text
74
-
75
-
76
- def separate_characters_with_mask(text, mask):
77
- """Separates characters in a string and pairs them with a mask sign.
78
-
79
- Args:
80
- text: The input string.
81
-
82
- Returns:
83
- A list of tuples, where each tuple contains a character and a mask.
84
- """
85
-
86
- return [(char, mask) for char in text]
87
-
88
-
89
- def detect_ai_text(model_name, search_engine, text):
90
- if search_engine is True:
91
- keywords = get_keywords(text)
92
- important_sentences = get_important_sentences(text, keywords)
93
- predictions = is_human_written(important_sentences[0])
94
- print("keywords: ", keywords)
95
- print("important_sentences: ", important_sentences)
96
- print("predictions: ", predictions)
97
- if predictions == -1:
98
- caption = "[Found exact match] "
99
- text_scores = list(zip([caption, text], [0, predictions]))
100
- print("text_scores: ", text_scores)
101
- return text_scores
102
-
103
- if model_name == "SimLLM":
104
- tokenize_input = SimLLM_tokenizer(text, return_tensors="pt")
105
- outputs = SimLLM_model(**tokenize_input)
106
- predictions = outputs.logits.argmax(dim=-1).item()
107
- if predictions == 0:
108
- predictions = "human-written"
109
- else:
110
- predictions = "machine-generated"
111
-
112
- elif model_name == "MAGE":
113
- processed_text = preprocess(text)
114
- predictions = detect(
115
- processed_text,
116
- MAGE_tokenizer,
117
- MAGE_model,
118
- device,
119
- )
120
-
121
- elif model_name == "chatgpt-detector-roberta":
122
- predictions = roberta_pipeline_en(text)[0]["label"]
123
- if predictions == "Human":
124
- predictions = "human-written"
125
- else: # ChatGPT
126
- predictions = "machine-generated"
127
- elif model_name == "PASTED-Lexical":
128
- predictions = detector(text)
129
-
130
- if model_name != "PASTED-Lexical":
131
- text_scores = list(zip([text], [predictions]))
132
- else:
133
- text_scores = []
134
- for text, score in predictions:
135
- new_score = convert_score_range(score) # normalize score
136
- text_scores.append((text, new_score))
137
-
138
- return text_scores
139
-
140
-
141
- diffusion_model_path = (
142
- "src/images/Diffusion/model_checkpoints/"
143
- "image-classifier-step=7007-val_loss=0.09.ckpt"
144
- )
145
- cnn_model_path = "src/images/CNN/model_checkpoints/blur_jpg_prob0.5.pth"
146
-
147
-
148
- def detect_ai_image(input_image_path, search_engine):
149
- # if search_engine is True:
150
- # Search image
151
-
152
- rev_img_searcher = ReverseImageSearcher()
153
- search_items = rev_img_searcher.search_by_file(input_image_path)
154
- min_result_difference = 5000
155
- result_image_url = ""
156
- input_image = get_image_from_path(input_image_path)
157
-
158
- for search_item in search_items:
159
- # print(f'Title: {search_item.page_title}')
160
- # print(f'Site: {search_item.page_url}')
161
- # print(f'Img: {search_item.image_url}\n')
162
-
163
- # Compare each search result image with the input image
164
- result_image = get_image_from_url(search_item.image_url)
165
- # input_image = get_image_from_url(search_item.image_url)
166
- result_difference = compare_images(result_image, input_image)
167
-
168
- print(f"Difference with search result: {result_difference}")
169
- print(f"Result image url: {search_item.page_url}\n")
170
-
171
- if min_result_difference > result_difference:
172
- min_result_difference = result_difference
173
- result_image_url = search_item.image_url
174
- result_page_url = search_item.page_url
175
-
176
-
177
- if result_difference == 0:
178
- break
179
-
180
-
181
- if min_result_difference == 0:
182
- result = f"<h1>Input image is LIKELY SIMILAR to image from:</h1>"\
183
- f"<ul>"\
184
- f'<li>\nPage URL: <a href="url">{result_page_url}</a></li>'\
185
- f'<li>\nImage URL: <a href="url">{result_image_url}</a></li>'\
186
- f"<li>\nDifference score: {min_result_difference}</li>"\
187
- f"</ul>"
188
- elif 10 > min_result_difference > 0:
189
- result = f"<h1>Input image is potentially a VARIATRION from:</h1>"\
190
- f"<ul>"\
191
- f'<li>\nPage URL: <a href="url">{result_page_url}</a></li>'\
192
- f'<li>\nImage URL: <a href="url">{result_image_url}</a></li>'\
193
- f"<li>\nDifference score: {min_result_difference}</li>"\
194
- f"</ul>"
195
- elif min_result_difference < 5000:
196
- result = f"<h1>Input image is not similar to any search results.</h1>"\
197
- f"<ul>"\
198
- f'<li>\nPage URL: <a href="url">{result_page_url}</a></li>'\
199
- f'<li>\nImage URL: <a href="url">{result_image_url}</a></li>'\
200
- f"<li>\nDifference score: {min_result_difference}</li>"\
201
- f"</ul>"
202
- else:
203
- result = f"<h1>No search result found.</h1>"\
204
-
205
- return result
206
-
207
- # def get_prediction_diffusion(image):
208
- # model = ImageClassifier.load_from_checkpoint(diffusion_model_path)
209
-
210
- # prediction = predict_single_image(image, model)
211
- # return (prediction >= 0.5, prediction)
212
-
213
- # def get_prediction_cnn(image):
214
- # prediction = predict_cnn(image, cnn_model_path)
215
- # return (prediction >= 0.5, prediction)
216
-
217
- # # Define the transformations for the image
218
- # transform = transforms.Compose(
219
- # [
220
- # transforms.Resize((224, 224)), # Image size expected by ResNet50
221
- # transforms.ToTensor(),
222
- # transforms.Normalize(
223
- # mean=[0.485, 0.456, 0.406],
224
- # std=[0.229, 0.224, 0.225],
225
- # ),
226
- # ],
227
- # )
228
- # image_tensor = transform(inp)
229
- # pred_diff, prob_diff = get_prediction_diffusion(image_tensor)
230
- # pred_cnn, prob_cnn = get_prediction_cnn(image_tensor)
231
- # verdict = (
232
- # "AI Generated" if (pred_diff or pred_cnn) else "No GenAI detected"
233
- # )
234
- # return (
235
- # f"<h1>{verdict}</h1>"
236
- # f"<ul>"
237
- # f"<li>Diffusion detection score: {prob_diff:.1%} "
238
- # f"{'(MATCH)' if pred_diff else ''}</li>"
239
- # f"<li>CNN detection score: {prob_cnn:.1%} "
240
- # f"{'(MATCH)' if pred_cnn else ''}</li>"
241
- # f"</ul>"
242
- # )
243
-
244
-
245
- # Define GPUs
246
- device = "cpu" # use 'cuda:0' if GPU is available
247
-
248
- # init MAGE
249
- model_dir = "yaful/MAGE" # model in huggingface
250
- MAGE_tokenizer = AutoTokenizer.from_pretrained(model_dir)
251
- MAGE_model = AutoModelForSequenceClassification.from_pretrained(model_dir).to(
252
- device,
253
- )
254
-
255
- # init chatgpt-detector-roberta
256
- model_dir = "Hello-SimpleAI/chatgpt-detector-roberta" # model in huggingface
257
- roberta_pipeline_en = pipeline(task="text-classification", model=model_dir)
258
-
259
- # init PASTED
260
- model_dir = "linzw/PASTED-Lexical"
261
- detector = Detector(model_dir, device)
262
-
263
- # init SimLLM
264
- model_path = "./models/single_model_detector"
265
- SimLLM_tokenizer = AutoTokenizer.from_pretrained(model_path)
266
- SimLLM_model = AutoModelForSequenceClassification.from_pretrained(model_path)
267
-
268
- # Init variable for UI
269
- title = """
270
- <center>
271
-
272
- <h1> AI-generated content detection </h1>
273
- <b> Demo by NICT & Tokyo Techies <b>
274
-
275
- </center>
276
- """
277
-
278
- examples = [
279
- [
280
- "SimLLM",
281
- False,
282
- """\
283
- The BBC's long-running consumer rights series Watchdog is to end as a \
284
- standalone programme, instead becoming part of The One Show. Watchdog \
285
- began in 1980 as a strand of Nationwide, but proved so popular it \
286
- became a separate programme in 1985. Co-host Steph McGovern has moved \
287
- to Channel 4, but Matt Allwright and Nikki Fox will stay to front the \
288
- new strand. The BBC said they would investigate viewer complaints all \
289
- year round rather than for two series a year.
290
- """,
291
- ],
292
- [
293
- "chatgpt-detector-roberta",
294
- False,
295
- """\
296
- Artificial intelligence (AI) is the science of making machines \
297
- intelligent. It enables computers to learn from data, recognize \
298
- patterns, and make decisions. AI powers many technologies we use \
299
- daily, from voice assistants to self-driving cars. It's rapidly \
300
- evolving, promising to revolutionize various industries and reshape \
301
- the future.""",
302
- ],
303
- ]
304
-
305
- model_remark = """<left>
306
- Model sources:
307
- <a href="https://github.com/Tokyo-Techies/prj-nict-ai-content-detection">SimLLM</a>,
308
- <a href="https://github.com/yafuly/MAGE">MAGE</a>,
309
- <a href="https://huggingface.co/Hello-SimpleAI/chatgpt-detector-roberta">chatgpt-detector-roberta</a>,
310
- <a href="https://github.com/Linzwcs/PASTED">PASTED-Lexical</a>.
311
- </left>
312
- """ # noqa: E501
313
-
314
- image_samples = [
315
- ["src/images/samples/fake_dalle.jpg", "Generated (Dall-E)"],
316
- ["src/images/samples/fake_midjourney.png", "Generated (MidJourney)"],
317
- ["src/images/samples/fake_stable.jpg", "Generated (Stable Diffusion)"],
318
- ["src/images/samples/fake_cnn.png", "Generated (GAN)"],
319
- ["src/images/samples/real.png", "Organic"],
320
- [
321
- "https://p.potaufeu.asahi.com/1831-p/picture/27695628/89644a996fdd0cfc9e06398c64320fbe.jpg", # noqa E501
322
- "Internet GenAI",
323
- ],
324
- ]
325
- image_samples_path = [i[0] for i in image_samples]
326
-
327
- # UI
328
  with gr.Blocks() as demo:
329
- with gr.Row():
330
- gr.HTML(title)
331
- with gr.Row():
332
- with gr.Tab("Text"):
333
- with gr.Row():
334
- with gr.Column():
335
- model = gr.Dropdown(
336
- [
337
- "SimLLM",
338
- "MAGE",
339
- "chatgpt-detector-roberta",
340
- "PASTED-Lexical",
341
- ],
342
- label="Detection model",
343
- )
344
- search_engine = gr.Checkbox(label="Use search engine")
345
- gr.HTML(model_remark)
346
- with gr.Column():
347
- text_input = gr.Textbox(
348
- label="Input text",
349
- placeholder="Enter text here...",
350
- lines=5,
351
- )
352
-
353
- output = gr.HighlightedText(
354
- label="Detection results",
355
- combine_adjacent=True,
356
- show_legend=True,
357
- color_map={
358
- "human-written": "#7d58cf",
359
- "machine-generated": "#e34242",
360
- },
361
- )
362
-
363
- gr.Examples(
364
- examples=examples,
365
- inputs=[model, search_engine, text_input],
366
- )
367
- model.change(
368
- detect_ai_text,
369
- inputs=[model, search_engine, text_input],
370
- outputs=output,
371
- )
372
- search_engine.change(
373
- detect_ai_text,
374
- inputs=[model, search_engine, text_input],
375
- outputs=output,
376
- )
377
- text_input.change(
378
- detect_ai_text,
379
- inputs=[model, search_engine, text_input],
380
- outputs=output,
381
- )
382
- with gr.Tab("Images"):
383
- with gr.Row():
384
- input_image = gr.Image(type="filepath")
385
- with gr.Column():
386
- output_image = gr.Markdown(height=400)
387
- gr.Examples(
388
- examples=image_samples,
389
- inputs=input_image,
390
- )
391
-
392
- input_image.change(
393
- detect_ai_image,
394
- inputs=input_image,
395
- outputs=output_image,
396
- )
397
-
398
-
399
- # demo.launch(share=True)
400
- demo.launch(allowed_paths=image_samples_path, share=True)
 
1
+ #display a data
2
+ import gradio as gr
3
 
4
+ def data_display(replace_df):
5
+ return "aaaa"
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  with gr.Blocks() as demo:
8
+ replace_df = gr.Dataframe(
9
+ # headers=["Find what:", "Replace with:"],
10
+ # datatype=["str", "str"],
11
+ # row_count=(1, "dynamic"),
12
+ # col_count=(2, "fixed"),
13
+ # interactive=True
14
+ )
15
+ replace_button = gr.Button("Replace all")
16
+ news_content = gr.Textbox(label="Content", value="", lines=12)
17
+
18
+
19
+ replace_button.click(data_display,
20
+ inputs=[replace_df],
21
+ outputs=[news_content])
22
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
application.py CHANGED
@@ -6,13 +6,28 @@ import requests
6
  from PIL import Image
7
  import re
8
 
 
9
  from src.application.url_reader import URLReader
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
12
- openai.api_key = os.getenv('OPENAI_API_KEY')
13
  GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
14
  SEARCH_ENGINE_ID = os.getenv('SEARCH_ENGINE_ID')
15
 
 
 
16
  def load_url(url):
17
  """
18
  Load content from the given URL.
@@ -39,99 +54,75 @@ def load_url(url):
39
 
40
  return content.title, content.text, image
41
 
42
-
43
- def replace_terms(text, input_term, destination_term):
44
- # Replace input_term with destination_term in the text
45
- modified_text = re.sub(input_term, destination_term, text)
46
- return modified_text
47
-
48
- def generate_content(model1, model2, title, content):
49
- # Generate text using the selected models
50
- full_content = ""
51
- input_type = ""
52
- if title and content:
53
- full_content = title + "\n" + content
54
- input_type = "title and content"
55
- elif title:
56
- full_content = title
57
- input_type = "title"
58
- elif content:
59
- full_content = title
60
- input_type = "content"
61
-
62
- def generate_text(model, full_context, input_type):
63
- # Generate text using the selected model
64
- if input_type == "":
65
- prompt = "Generate a random fake news article"
66
- else:
67
- prompt = f"Generate a fake news article (title and content) based on the following {input_type}: {full_context}"
68
-
69
- try:
70
- response = openai.ChatCompletion.create(
71
- model=model,
72
- messages=[
73
- {"role": "user", "content": prompt}
74
- ]
75
- )
76
- return response.choices[0].message.content
77
-
78
- except openai.error.OpenAIError as e:
79
- print(f"Error interacting with OpenAI API: {e}")
80
- return "An error occurred while processing your request."
81
 
82
  # Define the GUI
83
  with gr.Blocks() as demo:
84
- gr.Markdown("# Fake News Detection")
85
 
86
  with gr.Row():
 
87
  with gr.Column(scale=1):
88
- gr.Markdown("## Settings")
89
- gr.Markdown("This tool generates fake news by modifying the content of a given URL.")
90
-
91
- with gr.Accordion("1. Enter a URL"):
92
- #gr.Markdown(" 1. Enter a URL.")
93
- url_input = gr.Textbox(
94
- label="URL",
95
- value="https://bbc.com/future/article/20250110-how-often-you-should-wash-your-towels-according-to-science",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  )
97
- load_button = gr.Button("Load an URL...")
98
-
99
- with gr.Accordion("2. Select a content-generation model", open=True):
100
- with gr.Row():
101
- model1_dropdown = gr.Dropdown(choices=["GPT 4o", "GPT 4o-mini"], label="Text-generation model")
102
- model2_dropdown = gr.Dropdown(choices=["Dall-e", "Stable Diffusion"], label="Image-generation model")
103
- generate_button = gr.Button("Random generation...")
104
-
105
- with gr.Accordion("3. Replace any terms", open=True):
106
- with gr.Row():
107
- input_term_box = gr.Textbox(label="Input Term")
108
- destination_term_box = gr.Textbox(label="Destination Term")
109
- replace_button = gr.Button("Replace term...")
110
-
111
- process_button = gr.Button("Process")
112
 
113
- with gr.Column(scale=2):
114
- gr.Markdown("## News contents")
115
- title_input = gr.Textbox(label="Title", value="")
116
- with gr.Row():
117
- image_view = gr.Image(label="Image")
118
- content_input = gr.Textbox(label="Content", value="", lines=15)
 
119
 
120
-
 
 
 
 
121
 
122
  # Connect events
123
  load_button.click(
124
  load_url,
125
  inputs=url_input,
126
- outputs=[title_input, content_input, image_view]
127
  )
128
- replace_button.click(replace_terms,
129
- inputs=[content_input, input_term_box, destination_term_box],
130
- outputs=content_input)
131
- process_button.click(generate_text,
132
- inputs=[url_input, model1_dropdown, model2_dropdown, input_term_box, destination_term_box, title_input, content_input],
133
- outputs=[title_input, content_input])
134
-
 
 
 
 
 
 
135
  #url_input.change(load_image, inputs=url_input, outputs=image_view)
136
 
137
  demo.launch()
 
6
  from PIL import Image
7
  import re
8
 
9
+ from src.application.content_detection import generate_analysis_report
10
  from src.application.url_reader import URLReader
11
+ from src.application.content_generation import generate_content, replace_text
12
+
13
+ # from dotenv import load_dotenv
14
+
15
+ # load_dotenv()
16
+ # AZURE_OPENAI_API_KEY = os.getenv('AZURE_OPENAI_API_KEY')
17
+ # AZURE_OPENAI_ENDPOINT = os.getenv('AZURE_OPENAI_ENDPOINT')
18
+ # AZURE_OPENAI_API_VERSION = os.getenv('AZURE_OPENAI_API_VERSION')
19
+
20
+ # client = openai.AzureOpenAI(
21
+ # api_version = AZURE_OPENAI_API_VERSION,
22
+ # api_key = AZURE_OPENAI_API_KEY,
23
+ # azure_endpoint = AZURE_OPENAI_ENDPOINT,
24
+ # )
25
 
 
 
26
  GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
27
  SEARCH_ENGINE_ID = os.getenv('SEARCH_ENGINE_ID')
28
 
29
+ AZURE_OPENAI_MODEL = ["gpt-4o-mini", "gpt-4o"]
30
+
31
  def load_url(url):
32
  """
33
  Load content from the given URL.
 
54
 
55
  return content.title, content.text, image
56
 
57
+ def show_detailed_analysis(title):
58
+ return f"More details of {title} will be shown here."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  # Define the GUI
61
  with gr.Blocks() as demo:
62
+ gr.Markdown("# FAKE NEWS DETECTION")
63
 
64
  with gr.Row():
65
+ # SETTINGS
66
  with gr.Column(scale=1):
67
+ with gr.Accordion("Settings"):
68
+ gr.Markdown("This tool generates fake news by modifying the content of a given URL.")
69
+
70
+ with gr.Accordion("1. Enter a URL"):
71
+ url_input = gr.Textbox(
72
+ label="URL",
73
+ value="https://bbc.com/future/article/20250110-how-often-you-should-wash-your-towels-according-to-science",
74
+ )
75
+ load_button = gr.Button("Load URL")
76
+
77
+ with gr.Accordion("2. Select a content-generation model", open=True):
78
+ with gr.Row():
79
+ text_generation_model = gr.Dropdown(choices=AZURE_OPENAI_MODEL, label="Text-generation model")
80
+ image_generation_model = gr.Dropdown(choices=["Dall-e", "Stable Diffusion"], label="Image-generation model")
81
+ generate_button = gr.Button("Random generation")
82
+
83
+ with gr.Accordion("3. Replace any terms", open=True):
84
+ replace_df = gr.Dataframe(
85
+ headers=["Find what:", "Replace with:"],
86
+ datatype=["str", "str"],
87
+ row_count=(1, "dynamic"),
88
+ col_count=(2, "fixed"),
89
+ interactive=True
90
  )
91
+ replace_button = gr.Button("Replace all")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
+ # GENERATED CONTENT
94
+ with gr.Column(scale=1):
95
+ with gr.Accordion("Generated News Contents"):
96
+ detection_button = gr.Button("Check for fake news")
97
+ news_title = gr.Textbox(label="Title", value="")
98
+ news_image = gr.Image(label="Image")
99
+ news_content = gr.Textbox(label="Content", value="", lines=12)
100
 
101
+ # FAKE NEWS ANALYSIS REPORT
102
+ with gr.Column(scale=1):
103
+ with gr.Accordion("Fake News Analysis"):
104
+ html_out = gr.HTML()
105
+ detailed_analysis_button = gr.Button("Show detailed analysis...")
106
 
107
  # Connect events
108
  load_button.click(
109
  load_url,
110
  inputs=url_input,
111
+ outputs=[news_title, news_content, news_image]
112
  )
113
+ replace_button.click(replace_text,
114
+ inputs=[news_title, news_content, replace_df],
115
+ outputs=[news_title, news_content])
116
+ generate_button.click(generate_content,
117
+ inputs=[text_generation_model, image_generation_model, news_title, news_content],
118
+ outputs=[news_title, news_content])
119
+ detection_button.click(generate_analysis_report,
120
+ inputs=[news_title, news_content, news_image],
121
+ outputs=html_out)
122
+ detailed_analysis_button.click(show_detailed_analysis,
123
+ inputs=[news_title],
124
+ outputs=[html_out])
125
+ # change Image
126
  #url_input.change(load_image, inputs=url_input, outputs=image_view)
127
 
128
  demo.launch()
requirements.txt CHANGED
@@ -1,7 +1,10 @@
1
  # UI
2
  gradio==5.13.1
3
 
4
- # network
 
 
 
5
  requests==2.31.0
6
  beautifulsoup4==4.12.3
7
  lxml-html-clean==0.4.1
 
1
  # UI
2
  gradio==5.13.1
3
 
4
+ # Read environment variables
5
+ python-dotenv
6
+
7
+ # URL processing
8
  requests==2.31.0
9
  beautifulsoup4==4.12.3
10
  lxml-html-clean==0.4.1
src/application/content_detection.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from src.application.text.model_detection import detect_by_ai_model
4
+ from src.application.text.search_detection import check_human, detect_by_relative_search
5
+
6
+
7
+ def determine_text_origin(title, content):
8
+ """
9
+ Determines the origin of the given text based on paraphrasing detection and human authorship analysis.
10
+
11
+ Args:
12
+ text: The input text to be analyzed.
13
+
14
+ Returns:
15
+ str: The predicted origin of the text:
16
+ - "HUMAN": If the text is likely written by a human.
17
+ - "MACHINE": If the text is likely generated by a machine.
18
+ """
19
+ # Classify by search engine
20
+ text = title + "\n\n" + content
21
+ is_paraphrased, referent_url, aligned_sentences = detect_by_relative_search(text)
22
+ prediction_score = 0.0
23
+ if not is_paraphrased:
24
+ prediction_label = "UNKNOWN"
25
+ else:
26
+ prediction_score = 100.0
27
+ if check_human(aligned_sentences):
28
+ prediction_label = "HUMAN"
29
+ else:
30
+ prediction_label = "MACHINE"
31
+
32
+ if prediction_label == "UNKNOWN":
33
+ # Classify by SOTA model
34
+ prediction_label, prediction_score = detect_by_ai_model(text)
35
+
36
+ return prediction_label, prediction_score, referent_url
37
+
38
+
39
+ def generate_analysis_report(news_title, news_content, news_image):
40
+
41
+ text_prediction_label, text_confidence_score, text_referent_url = determine_text_origin(news_title, news_content)
42
+
43
+ # Analyze text content
44
+ url1 = text_referent_url
45
+ #url2 = "https://example.com/article2"
46
+
47
+ # Forensic analysis
48
+ if text_prediction_label == "MACHINE":
49
+ text_prediction_label = "The text is modified by GPT-4o (AI)"
50
+ else:
51
+ text_prediction_label = "The text is written by HUMAN"
52
+
53
+ image_detection_results = "MACHINE"
54
+ if image_detection_results == "MACHINE":
55
+ image_detection_results = "The image is generated by Dall-e (AI)"
56
+ else:
57
+ image_detection_results = "The image is generated by HUMAN"
58
+ image_confidence_score = 90.5
59
+
60
+ news_detection_results = "MACHINE"
61
+ if news_detection_results == "MACHINE":
62
+ news_detection_results = "The whole news generated by AI"
63
+ else:
64
+ news_detection_results = "The whole news written by HUMAN"
65
+ news_confidence_score = 97.4
66
+
67
+ # Misinformation analysis
68
+ out_of_context_results = "cohesive"
69
+ if out_of_context_results == "cohesive":
70
+ out_of_context_results = "The input news is cohesive (non-out-of-context)"
71
+ else:
72
+ out_of_context_results = "The input news is out-of-context"
73
+ out_of_context_confidence_score = 96.7
74
+
75
+ # Description
76
+ description = "The description should be concise, clear, and aimed at helping general readers understand the case."
77
+
78
+ html_template = f"""
79
+ <h2>Placeholder for results</h2>
80
+
81
+ <div>
82
+ <h3>Originality:</h3>
83
+ <ul>
84
+ <li><a href="{url1}" target="_blank">{url1[:40] + "..."}</a></li>
85
+ </ul>
86
+ </div>
87
+
88
+ <div>
89
+ <h3>Forensic:</h3>
90
+ <b>{news_detection_results} (confidence = {news_confidence_score}%)</b>
91
+ <ul>
92
+ <li>{text_prediction_label} (confidence = {text_confidence_score}%)</li>
93
+ <li>{image_detection_results} (confidence = {image_confidence_score}%)</li>
94
+ </ul>
95
+ </div>
96
+
97
+ <div>
98
+ <h3>Misinformation:</h3>
99
+ <ul>
100
+ <li>The input news is {out_of_context_results} (confidence = {out_of_context_confidence_score}%)</li>
101
+ </ul>
102
+ </div>
103
+
104
+ <div>
105
+ <h3>Description (optional):</h3>
106
+ <ul>
107
+ <li>{description}</li>
108
+ </ul>
109
+ </div>
110
+ """
111
+
112
+ return html_template
src/application/content_generation.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ from dotenv import load_dotenv
3
+ import os
4
+ import re
5
+
6
+ load_dotenv()
7
+ AZURE_OPENAI_API_KEY = os.getenv('AZURE_OPENAI_API_KEY')
8
+ AZURE_OPENAI_ENDPOINT = os.getenv('AZURE_OPENAI_ENDPOINT')
9
+ AZURE_OPENAI_API_VERSION = os.getenv('AZURE_OPENAI_API_VERSION')
10
+
11
+ client = openai.AzureOpenAI(
12
+ api_version = AZURE_OPENAI_API_VERSION,
13
+ api_key = AZURE_OPENAI_API_KEY,
14
+ azure_endpoint = AZURE_OPENAI_ENDPOINT,
15
+ )
16
+
17
+ def generate_content(text_generation_model, image_generation_model, title, content):
18
+ # Generate text using the selected models
19
+ full_content = ""
20
+ input_type = ""
21
+ if title and content:
22
+ full_content = title + "\n" + content
23
+ input_type = "title and content"
24
+ elif title:
25
+ full_content = title
26
+ input_type = "title"
27
+ elif content:
28
+ full_content = title
29
+ input_type = "content"
30
+
31
+ # Generate text using the text generation model
32
+ generated_text = generate_text(text_generation_model, full_content, input_type)
33
+ return title, generated_text
34
+
35
+ def generate_text(model, full_context, input_type):
36
+ # Generate text using the selected model
37
+ if input_type == "":
38
+ prompt = "Generate a random fake news article"
39
+ else:
40
+ prompt = f"Generate a fake news article (title and content) based on the following: # Title: {input_type}:\n\n# Content: {full_context}"
41
+
42
+ try:
43
+ response = client.chat.completions.create(
44
+ model=model,
45
+ messages = [{"role": "system", "content": prompt}],
46
+ )
47
+
48
+ print("Response from OpenAI API: ", response.choices[0].message.content)
49
+ return response.choices[0].message.content
50
+
51
+ except openai.OpenAIError as e:
52
+ print(f"Error interacting with OpenAI API: {e}")
53
+ return "An error occurred while processing your request."
54
+
55
+ def replace_text(news_title, news_content, replace_df):
56
+ """
57
+ Replaces occurrences in the input text based on the provided DataFrame.
58
+
59
+ Args:
60
+ text: The input text.
61
+ replace_df: A pandas DataFrame with two columns: "find_what" and "replace_with".
62
+
63
+ Returns:
64
+ The text after all replacements have been made.
65
+ """
66
+ for _, row in replace_df.iterrows():
67
+ find_what = row["Find what:"]
68
+ replace_with = row["Replace with:"]
69
+ news_content = news_content.replace(find_what, replace_with)
70
+ news_title = news_title.replace(find_what, replace_with)
71
+ return news_title, news_content
src/application/text/helper.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import Counter
2
+ import re
3
+ import string
4
+ from sklearn.feature_extraction.text import TfidfVectorizer
5
+ from nltk.tokenize import word_tokenize
6
+ from nltk.util import ngrams
7
+
8
+
9
+ def clean_text(text):
10
+ """Doc cleaning"""
11
+ punctuations = r"""!"#$%&'()*+-/:;<=>?@[\]^_`{|}~""" # not include , and . due to number
12
+ # Lowering text
13
+ text = text.lower()
14
+
15
+ # Removing punctuation
16
+ text = "".join([c for c in text if c not in punctuations])
17
+
18
+ # Removing whitespace and newlines
19
+ text = re.sub(r'\s+',' ',text)
20
+
21
+ text.replace("Β£", " * ")
22
+
23
+ words = text.split()
24
+ text = ' '.join(words[:18]) # Join the first 18 words back into a string
25
+
26
+ return text
27
+
28
+ def remove_punctuation(text):
29
+ """Remove punctuation from a given text."""
30
+ punctuation_without_dot = string.punctuation.replace(".", "")
31
+ translator = str.maketrans('', '', punctuation_without_dot)
32
+ return text.translate(translator)
33
+
34
+ def get_keywords(text, num_keywords=5):
35
+ """Return top k keywords from a doc using TF-IDF method"""
36
+
37
+ # Create a TF-IDF Vectorizer
38
+ vectorizer = TfidfVectorizer(stop_words='english')
39
+
40
+ # Fit and transform the text
41
+ tfidf_matrix = vectorizer.fit_transform([text])
42
+
43
+ # Get feature names (words)
44
+ feature_names = vectorizer.get_feature_names_out()
45
+
46
+ # Get TF-IDF scores
47
+ tfidf_scores = tfidf_matrix.toarray()[0]
48
+
49
+ # Sort words by TF-IDF score
50
+ word_scores = list(zip(feature_names, tfidf_scores))
51
+ word_scores.sort(key=lambda x: x[1], reverse=True)
52
+
53
+ # Return top keywords
54
+ return [word for word, score in word_scores[:num_keywords]]
55
+
56
+ """
57
+ # Example usage
58
+ text = "Artificial intelligence (AI) is intelligence demonstrated by machines, as opposed to natural intelligence displayed by animals including humans. Leading AI textbooks define the field as the study of "intelligent agents": any system that perceives its environment and takes actions that maximize its chance of achieving its goals. Some popular accounts use the term "artificial intelligence" to describe machines that mimic "cognitive" functions that humans associate with the human mind, such as "learning" and "problem solving", however this definition is rejected by major AI researchers."
59
+ print(f"\n# Input text:\n'{text}'")
60
+ print("\n----------------------\n")
61
+
62
+ keywords = get_keywords(text)
63
+ print("# Top keywords:", keywords)
64
+ print("\n----------------------\n")
65
+ """
66
+
67
+ def get_important_sentences(paragraph: str, keywords: list[str], num_sentences: int = 3) -> list[str]:
68
+ """
69
+ Selects important sentences from a given paragraph based on a list of keywords.
70
+
71
+ Args:
72
+ paragraph (str): The input paragraph.
73
+ keywords (list[str]): List of important keywords.
74
+ num_sentences (int): Number of sentences to return (default is 3).
75
+
76
+ Returns:
77
+ list: A list of important sentences.
78
+ """
79
+ # Clean and split the paragraph into sentences
80
+ sentences = [s.strip() for s in re.split(r'(?<=[.!?])\s+', paragraph) if s.strip()]
81
+
82
+ # Calculate the importance score for each sentence
83
+ sentence_scores = []
84
+ for sentence in sentences:
85
+ processed_sentence = clean_text(sentence)
86
+ score = 0
87
+ words = processed_sentence.lower().split()
88
+ word_count = Counter(words)
89
+
90
+ for keyword in keywords:
91
+ if keyword.lower() in word_count:
92
+ score += word_count[keyword.lower()]
93
+
94
+ sentence_scores.append((sentence, score))
95
+
96
+ # Sort sentences by their scores in descending order
97
+ sentence_scores.sort(key=lambda x: x[1], reverse=True)
98
+
99
+ # Return the top N sentences
100
+ return [sentence for sentence, score in sentence_scores[:num_sentences]]
101
+
102
+ """# Example usage
103
+ keywords = get_keywords(paragraph)
104
+ important_sentences = get_important_sentences(paragraph, keywords)
105
+
106
+ print("# Important sentences:")
107
+ for i, sentence in enumerate(important_sentences, 1):
108
+ print(f"{i}. {sentence}")
109
+ print("\n----------------------\n")
110
+ """
111
+
112
+ def extract_important_phrases(paragraph: str, keywords: list[str], phrase_length: int = 5) -> list[str]:
113
+ """
114
+ Extracts important phrases from a given paragraph based on a list of keywords.
115
+ Phrase length is auto-determined, and overlapped parts are less than 20%.
116
+
117
+ Args:
118
+ paragraph (str): The input paragraph.
119
+ keywords (list[str]): List of important keywords.
120
+ phrase_length (int): The length of phrases to extract (default is 5 words).
121
+
122
+ Returns:
123
+ list: A list of important phrases.
124
+ """
125
+ # Tokenize the paragraph into words
126
+ words = word_tokenize(paragraph.lower())
127
+
128
+ # Determine phrase length (between 3 and 7 words)
129
+ phrase_length = min(max(len(words) // 10, 5), 7)
130
+
131
+ # Generate n-grams (phrases) from the paragraph
132
+ phrases = list(ngrams(words, phrase_length))
133
+
134
+ important_phrases = []
135
+ used_indices = set()
136
+
137
+ for i, phrase in enumerate(phrases):
138
+ # Check if the phrase contains any keyword
139
+ if any(keyword.lower() in phrase for keyword in keywords):
140
+ # Check overlap with previously selected phrases
141
+ if not any(abs(i - j) < phrase_length * 0.8 for j in used_indices):
142
+ important_phrases.append(clean_text(" ".join(phrase)))
143
+ used_indices.add(i)
144
+
145
+ return important_phrases
146
+
147
+ """# Example usage
148
+ keywords = get_keywords(paragraph)
149
+ important_phrases = extract_important_phrases(paragraph, keywords)
150
+
151
+ print("# Important phrases:")
152
+ for i, phrase in enumerate(important_phrases[:5], 1): # Print top 5 phrases
153
+ print(f"{i}. {phrase}")"""
src/application/text/identity.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+
3
+ ner_pipeline = pipeline("ner")
4
+
5
+ def extract_entities(text):
6
+ output = ner_pipeline(text)
7
+ words = extract_words(output)
8
+ words = combine_subwords(words)
9
+
10
+ # extract word in each entity and assign to a list of entities, connect words if there is no space between them
11
+ entities = []
12
+ for entity in words:
13
+ if entity not in entities:
14
+ entities.append(entity)
15
+
16
+ return entities
17
+
18
+
19
+ def extract_words(entities):
20
+ """
21
+ Extracts the words from a list of entities.
22
+
23
+ Args:
24
+ entities: A list of entities.
25
+
26
+ Returns:
27
+ A list of words extracted from the entities.
28
+ """
29
+ words = []
30
+ for entity in entities:
31
+ words.append(entity["word"])
32
+ return words
33
+
34
+
35
+ def combine_subwords(word_list):
36
+ """
37
+ Combines subwords (indicated by "##") with the preceding word in a list.
38
+
39
+ Args:
40
+ word_list: A list of words, where subwords are prefixed with "##".
41
+
42
+ Returns:
43
+ A new list with subwords combined with their preceding words.
44
+ """
45
+ result = []
46
+ i = 0
47
+ while i < len(word_list):
48
+ if word_list[i].startswith("##"):
49
+ result[-1] += word_list[i][2:] # Remove "##" and append to the previous word
50
+ elif i < len(word_list) - 2 and word_list[i + 1] == "-": # Combine hyphenated words
51
+ result.append(word_list[i] + word_list[i + 1] + word_list[i + 2])
52
+ i += 2 # Skip the next two words
53
+ else:
54
+ result.append(word_list[i])
55
+ i += 1
56
+ return result
57
+
58
+ if __name__ == "__main__":
59
+ text = "The Saudi authorities, I am told, are currently working flat out" \
60
+ "to collate everything they have on the Magdeburg market suspect," \
61
+ "Taleb al-Abdulmohsen, and to share it with Germany's ongoing" \
62
+ "investigation"
63
+ print(extract_entities(text))
src/application/text/model_detection.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+
3
+ # TODO: move to a config file
4
+ DEFAULT_MODEL = "Hello-SimpleAI/chatgpt-detector-roberta"
5
+
6
+ MODEL_HUMAN_LABEL = {DEFAULT_MODEL: "Human"}
7
+ HUMAN = "HUMAN"
8
+ MACHINE = "MACHINE"
9
+ UNKNOWN = "UNKNOWN"
10
+ PARAPHRASE = "PARAPHRASE"
11
+ NON_PARAPHRASE = "NON_PARAPHRASE"
12
+
13
+
14
+ def detect_by_ai_model(
15
+ input_text: str,
16
+ model: str = DEFAULT_MODEL,
17
+ max_length: int = 512,
18
+ ) -> tuple:
19
+ """
20
+ Model: chatgpt_detector_roberta
21
+ Ref: https://huggingface.co/Hello-SimpleAI/chatgpt-detector-roberta
22
+
23
+ Detects if text is human or machine generated.
24
+
25
+ Returns:
26
+ tuple: (label, confidence_score)
27
+ where label is HUMAN or MACHINE.
28
+ """
29
+ try:
30
+ pipe = pipeline(
31
+ "text-classification",
32
+ model=model,
33
+ tokenizer=model,
34
+ max_length=max_length,
35
+ truncation=True,
36
+ device_map="auto", # good for GPU usage
37
+ )
38
+ result = pipe(input_text)[0]
39
+ confidence_score = result["score"]
40
+ if result["label"] == MODEL_HUMAN_LABEL[model]:
41
+ label = HUMAN
42
+ else:
43
+ label = MACHINE
44
+ return label, confidence_score
45
+ except Exception as e: # Add exception handling
46
+ print(f"Error in Roberta model inference: {e}")
47
+ return UNKNOWN, 0.0 # Return UNKNOWN and 0.0 confidence if error
src/application/text/preprocessing.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from nltk.tokenize import sent_tokenize
2
+
3
+ def split_into_sentences(input_text):
4
+ """
5
+ Splits input text into sentences by newlines.
6
+
7
+ Args:
8
+ input_text: The input text as a string.
9
+
10
+ Returns:
11
+ A list of sentences. Returns an empty list if input is not valid.
12
+ """
13
+ if not isinstance(input_text, str):
14
+ return []
15
+
16
+ paragraphs = input_text.splitlines()
17
+ sentences = []
18
+ for paragraph in paragraphs:
19
+ paragraph = paragraph.strip()
20
+ if paragraph:
21
+ sentences.extend(sent_tokenize(paragraph))
22
+ return sentences
src/application/text/search.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import Counter
2
+ import os
3
+ import string
4
+ import requests
5
+ from dotenv import load_dotenv
6
+ from nltk.corpus import stopwords
7
+ from nltk.tokenize import sent_tokenize, word_tokenize
8
+ from sklearn.feature_extraction.text import TfidfVectorizer
9
+
10
+ from src.application.text.identity import extract_entities
11
+
12
+ load_dotenv()
13
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
14
+ SEARCH_ENGINE_ID = os.getenv("SEARCH_ENGINE_ID")
15
+
16
+ def search_by_google(
17
+ query,
18
+ num_results=10,
19
+ is_exact_terms = False
20
+ ) -> dict:
21
+ """
22
+ Searches the Google Custom Search Engine for the given query.
23
+
24
+ Args:
25
+ query: The search query.
26
+ is_exact_terms: Whether to use exact terms search (True) or regular search (False).
27
+ num_results: The number of results to return (default: 10).
28
+
29
+ Returns:
30
+ A dictionary containing the search results or None if there was an error.
31
+ """
32
+
33
+ url = "https://www.googleapis.com/customsearch/v1"
34
+ params = {
35
+ "key": GOOGLE_API_KEY,
36
+ "cx": SEARCH_ENGINE_ID,
37
+ "num": num_results,
38
+ }
39
+ if is_exact_terms:
40
+ params["exactTerms"] = query
41
+ else:
42
+ params["q"] = query.replace('"', "")
43
+
44
+ response = requests.get(url, params=params)
45
+ if response.status_code == 200:
46
+ return response.json()
47
+ else:
48
+ print(f"Error: {response.status_code}, {response.text}")
49
+ return None
50
+
51
+ def get_most_frequent_words(input_text, number_word=32):
52
+ """
53
+ Gets the top words from the input text, excluding stop words and punctuation.
54
+
55
+ Args:
56
+ input_text: The input text as a string.
57
+ number_word: The number of top words to return.
58
+
59
+ Returns:
60
+ A list of tuples, where each tuple contains a word and its frequency.
61
+ Returns an empty list if input is not a string or is empty.
62
+ """
63
+ if not isinstance(input_text, str) or not input_text:
64
+ return []
65
+
66
+ words = word_tokenize(input_text.lower()) # Tokenize and lowercase
67
+
68
+ stop_words = set(stopwords.words('english'))
69
+ punctuation = set(string.punctuation) # get all punctuation
70
+ filtered_words = [
71
+ word for word in words
72
+ if word.isalnum() and word not in stop_words and word not in punctuation
73
+ ]
74
+ word_frequencies = Counter(filtered_words)
75
+ top_words = word_frequencies.most_common(number_word)
76
+
77
+ for top_word in top_words:
78
+ words.append(top_word[0])
79
+
80
+ if len(words) > 32:
81
+ search_phrase = " ".join(words[:32])
82
+ else:
83
+ search_phrase = " ".join(words[:number_word])
84
+
85
+ return search_phrase
86
+
87
+ def get_chunk(input_text, chunk_length=32, num_chunk=3):
88
+ """
89
+ Splits the input text into chunks of a specified length.
90
+
91
+ Args:
92
+ input_text: The input text as a string.
93
+ num_chunk: The maximum number of chunks to create.
94
+ chunk_length: The desired length of each chunk (in words).
95
+
96
+ Returns:
97
+ A list of string chunks.
98
+ Returns an empty list if input is invalid.
99
+ """
100
+ if not isinstance(input_text, str):
101
+ return []
102
+
103
+ chunks = []
104
+ input_words = input_text.split() # Split by any whitespace
105
+
106
+ for i in range(num_chunk):
107
+ start_index = i * chunk_length
108
+ end_index = (i + 1) * chunk_length
109
+ chunk = " ".join(input_words[start_index:end_index])
110
+ if chunk: # Only append non-empty chunks
111
+ chunks.append(chunk)
112
+
113
+ return chunks
114
+
115
+ def get_keywords(text, num_keywords=5):
116
+ """Return top k keywords from a doc using TF-IDF method"""
117
+
118
+ # Create a TF-IDF Vectorizer
119
+ vectorizer = TfidfVectorizer(stop_words='english')
120
+
121
+ # Fit and transform the text
122
+ tfidf_matrix = vectorizer.fit_transform([text])
123
+
124
+ # Get feature names (words)
125
+ feature_names = vectorizer.get_feature_names_out()
126
+
127
+ # Get TF-IDF scores
128
+ tfidf_scores = tfidf_matrix.toarray()[0]
129
+
130
+ # Sort words by TF-IDF score
131
+ word_scores = list(zip(feature_names, tfidf_scores))
132
+ word_scores.sort(key=lambda x: x[1], reverse=True)
133
+
134
+ # Return top keywords
135
+ return [word for word, score in word_scores[:num_keywords]]
136
+
137
+
138
+ def generate_search_phrases(input_text):
139
+ """
140
+ Generates different types of phrases for search purposes.
141
+
142
+ Args:
143
+ input_text: The input text.
144
+
145
+ Returns:
146
+ A list containing:
147
+ - A list of most frequent words.
148
+ - The original input text.
149
+ - A list of text chunks.
150
+ """
151
+ if not isinstance(input_text, str):
152
+ return []
153
+
154
+ search_phrases = []
155
+
156
+ # Method 1: Get most frequent words
157
+ search_phrases.append(get_most_frequent_words(input_text))
158
+
159
+ # Method 2: Get the whole text
160
+ search_phrases.append(input_text)
161
+
162
+ # Method 3: Split text by chunks
163
+ search_phrases.extend(get_chunk(input_text))
164
+
165
+ # Method 4: Get most identities and key words
166
+ entities = extract_entities(input_text)
167
+ keywords = get_keywords(input_text, 16)
168
+ search_phrase = " ".join(entities) + " " + " ".join(keywords)
169
+ search_phrases.append(search_phrase)
170
+
171
+ return search_phrases
src/application/text/search_detection.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.simplefilter(action='ignore', category=FutureWarning)
3
+
4
+ from src.application.text.preprocessing import split_into_sentences
5
+ from src.application.text.search import generate_search_phrases, search_by_google
6
+ from src.application.url_reader import URLReader
7
+ import numpy as np
8
+ import nltk
9
+ import torch
10
+ from nltk.corpus import stopwords
11
+ from sentence_transformers import SentenceTransformer, util
12
+ import math
13
+
14
+ from difflib import SequenceMatcher
15
+
16
+ # Download necessary NLTK data files
17
+ nltk.download('punkt', quiet=True)
18
+ nltk.download('punkt_tab', quiet=True)
19
+ nltk.download('stopwords', quiet=True)
20
+
21
+ # load the model
22
+ DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
23
+ PARAPHASE_MODEL = SentenceTransformer('paraphrase-MiniLM-L6-v2')
24
+ PARAPHASE_MODEL.to(DEVICE)
25
+
26
+ BATCH_SIZE = 8
27
+
28
+ PARAPHRASE_THRESHOLD = 0.8
29
+ PARAPHRASE_THRESHOLD_FOR_OPPOSITE = 0.7
30
+ MIN_SAME_SENTENCE_LEN = 6
31
+ MIN_PHRASE_SENTENCE_LEN = 10
32
+ MIN_RATIO_PARAPHRASE_NUM = 0.7
33
+ MAX_CHAR_SIZE = 30000
34
+
35
+
36
+ def detect_by_relative_search(input_text, is_support_opposite = False):
37
+
38
+ checked_urls = set()
39
+ searched_phrases = generate_search_phrases(input_text)
40
+
41
+ for candidate in searched_phrases:
42
+ search_results = search_by_google(candidate)
43
+ urls = [item['link'] for item in search_results.get("items", [])]
44
+
45
+ for url in urls[:3]:
46
+ if url in checked_urls: # visited url
47
+ continue
48
+ checked_urls.add(url)
49
+ print(f"\tChecking URL: {url}")
50
+
51
+ content = URLReader(url)
52
+
53
+ if content.is_extracted is True:
54
+ page_text = content.title + "\n" + content.text
55
+ if page_text is None or len(page_text) > MAX_CHAR_SIZE:
56
+ print(f"\t\t↑↑↑ More than {MAX_CHAR_SIZE} characters")
57
+ continue
58
+ is_paraphrase, aligned_sentences = check_paraphrase(input_text, page_text)
59
+ if is_paraphrase:
60
+ return is_paraphrase, url, aligned_sentences
61
+ return False, None, []
62
+
63
+ def longest_common_subsequence(arr1, arr2):
64
+ """
65
+ Finds the length of the longest common subsequence (contiguous) between
66
+ two arrays.
67
+
68
+ Args:
69
+ arr1: The first array.
70
+ arr2: The second array.
71
+
72
+ Returns:
73
+ The length of the longest common subsequence.
74
+ Returns 0 if either input is invalid.
75
+ """
76
+
77
+ if not isinstance(arr1, list) or not isinstance(arr2, list):
78
+ return 0
79
+
80
+ n = len(arr1)
81
+ m = len(arr2)
82
+
83
+ if n == 0 or m == 0: #handle empty list
84
+ return 0
85
+
86
+ # Create table dp with size (n+1) x (m+1)
87
+ dp = [[0] * (m + 1) for _ in range(n + 1)]
88
+ max_length = 0
89
+
90
+ for i in range(1, n + 1):
91
+ for j in range(1, m + 1):
92
+ if arr1[i - 1] == arr2[j - 1]:
93
+ dp[i][j] = dp[i - 1][j - 1] + 1
94
+ max_length = max(max_length, dp[i][j])
95
+ else:
96
+ dp[i][j] = 0 # set 0 since the array must be consecutive
97
+
98
+ return max_length
99
+
100
+
101
+ def check_sentence(input_sentence, source_sentence, min_same_sentence_len,
102
+ min_phrase_sentence_len, verbose=False):
103
+ """
104
+ Checks if two sentences are similar based on exact match or
105
+ longest common subsequence.
106
+
107
+ Args:
108
+ input_sentence: The input sentence.
109
+ source_sentence: The source sentence.
110
+ min_same_sentence_len: Minimum length for exact sentence match.
111
+ min_phrase_sentence_len: Minimum length for common subsequence match.
112
+ verbose: If True, print debug information.
113
+
114
+ Returns:
115
+ True if the sentences are considered similar, False otherwise.
116
+ Returns False if input is not valid.
117
+ """
118
+
119
+ if not isinstance(input_sentence, str) or not isinstance(source_sentence, str):
120
+ return False
121
+
122
+ input_sentence = input_sentence.strip()
123
+ source_sentence = source_sentence.strip()
124
+
125
+ if not input_sentence or not source_sentence: # handle empty string
126
+ return False
127
+
128
+ input_words = input_sentence.split() # split without arguments
129
+ source_words = source_sentence.split() # split without arguments
130
+
131
+ if input_sentence == source_sentence and len(input_words) >= min_same_sentence_len:
132
+ if verbose:
133
+ print("Exact match found.")
134
+ return True
135
+
136
+ max_overlap_len = longest_common_subsequence(input_words, source_words)
137
+ if verbose:
138
+ print(f"Max overlap length: {max_overlap_len}") # print overlap length
139
+ if max_overlap_len >= min_phrase_sentence_len:
140
+ return True
141
+
142
+ return False
143
+
144
+
145
+ def check_paraphrase(input_text, page_text, verbose=False):
146
+ """
147
+ Checks if the input text is paraphrased in the content at the given URL.
148
+
149
+ Args:
150
+ input_text: The text to check for paraphrase.
151
+ url: The URL of the web page to compare with.
152
+ verbose: If True, print debug information.
153
+
154
+ Returns:
155
+ A tuple containing:
156
+ - is_paraphrase: True if the input text is considered a paraphrase, False otherwise.
157
+ - paraphrase_results: A list of dictionaries, each containing:
158
+ - input_sentence: The sentence from the input text.
159
+ - matched_sentence: The corresponding sentence from the web page (if found).
160
+ - similarity: The cosine similarity score between the sentences.
161
+ - is_paraphrase_sentence: True if the individual sentence pair meets the paraphrase criteria, False otherwise.
162
+ """
163
+ is_paraphrase_text = False
164
+
165
+ if not isinstance(input_text, str) or not isinstance(page_text, str):
166
+ return False, []
167
+
168
+ # Extract sentences from input text and web page
169
+ #input_text = remove_punctuation(input_text)
170
+ input_sentences = split_into_sentences(input_text)
171
+
172
+
173
+ if not page_text:
174
+ return is_paraphrase_text, []
175
+ #page_text = remove_punctuation(page_text)
176
+ page_sentences = split_into_sentences(page_text)
177
+
178
+ if not input_sentences or not page_sentences:
179
+ return is_paraphrase_text, []
180
+
181
+ additional_sentences = []
182
+ for sentence in page_sentences:
183
+ if ", external" in sentence:
184
+ additional_sentences.append(sentence.replace(", external", ""))
185
+ page_sentences.extend(additional_sentences)
186
+
187
+ min_matching_sentences = math.ceil(len(input_sentences) * MIN_RATIO_PARAPHRASE_NUM)
188
+
189
+ # Encode sentences into embeddings
190
+ embeddings1 = PARAPHASE_MODEL.encode(input_sentences, convert_to_tensor=True, device=DEVICE)
191
+ embeddings2 = PARAPHASE_MODEL.encode(page_sentences, convert_to_tensor=True, device=DEVICE)
192
+
193
+ # Compute cosine similarity matrix
194
+ similarity_matrix = util.cos_sim(embeddings1, embeddings2).cpu().numpy()
195
+
196
+ # Find sentence alignments
197
+ alignment = []
198
+ paraphrased_sentence_count = 0
199
+ for i, sentence1 in enumerate(input_sentences):
200
+ max_sim_index = np.argmax(similarity_matrix[i])
201
+ max_similarity = similarity_matrix[i][max_sim_index]
202
+
203
+ is_paraphrase_sentence = max_similarity > PARAPHRASE_THRESHOLD
204
+
205
+ if 0.80 < max_similarity < 0.99:
206
+ print(f"\t\tinput_sentence : {sentence1}")
207
+ print(f"\t\tmatched_sentence: {page_sentences[max_sim_index]}")
208
+ print(f"\t\t--> similarity: {max_similarity}\n")
209
+ item = {
210
+ "input_sentence": sentence1,
211
+ "matched_sentence": page_sentences[max_sim_index],
212
+ "similarity": max_similarity,
213
+ "is_paraphrase_sentence": is_paraphrase_sentence,
214
+ }
215
+
216
+ # Check for individual sentence paraphrase if overall paraphrase not yet found
217
+ if not is_paraphrase_text and check_sentence(
218
+ sentence1, page_sentences[max_sim_index], MIN_SAME_SENTENCE_LEN, MIN_PHRASE_SENTENCE_LEN
219
+ ):
220
+ is_paraphrase_text = True
221
+ if verbose:
222
+ print(f"Paraphrase found for individual sentence: {sentence1}")
223
+ print(f"Matched sentence: {page_sentences[max_sim_index]}")
224
+
225
+ alignment.append(item)
226
+ paraphrased_sentence_count += 1 if is_paraphrase_sentence else 0
227
+
228
+ # Check if enough sentences are paraphrases
229
+
230
+ is_paraphrase_text = paraphrased_sentence_count >= min_matching_sentences
231
+
232
+ if verbose:
233
+ print (f"\t\tparaphrased_sentence_count: {paraphrased_sentence_count}, min_matching_sentences: {min_matching_sentences}, total_sentence_count: {len(input_sentences)}")
234
+ print(f"Minimum matching sentences required: {min_matching_sentences}")
235
+ print(f"Total input sentences: {len(input_sentences)}")
236
+ print(f"Number of matching sentences: {paraphrased_sentence_count}")
237
+ print(f"Is paraphrase: {is_paraphrase_text}")
238
+ for item in alignment:
239
+ print(item)
240
+
241
+ return is_paraphrase_text, alignment
242
+
243
+ def similarity_ratio(a, b):
244
+ """
245
+ Calculates the similarity ratio between two strings using SequenceMatcher.
246
+
247
+ Args:
248
+ a: The first string.
249
+ b: The second string.
250
+
251
+ Returns:
252
+ A float representing the similarity ratio between 0.0 and 1.0.
253
+ Returns 0.0 if either input is None or not a string.
254
+ """
255
+ if not isinstance(a, str) or not isinstance(b, str) or a is None or b is None:
256
+ return 0.0 # Handle cases where inputs are not strings or None
257
+ return SequenceMatcher(None, a, b).ratio()
258
+
259
+ def check_human(data, min_ratio=MIN_RATIO_PARAPHRASE_NUM):
260
+ """
261
+ Checks if a sufficient number of input sentences are found within
262
+ source sentences.
263
+
264
+ Returns:
265
+ bool: True if the condition is met, False otherwise.
266
+ """
267
+ if not data: # Handle empty data case
268
+ return False
269
+ min_matching = math.ceil(len(data) * min_ratio)
270
+
271
+ count = 0
272
+
273
+ #for input_sentence, source_sentence, similiarity, is_paraprhase in data:
274
+ for sentence in data:
275
+ if sentence["similarity"] >= 0.99:
276
+ count += 1
277
+ print(f"\tmatching_sentence_count : {count}, min_matching: {min_matching}")
278
+ if count >= min_matching:
279
+ return True
280
+ return False
281
+
282
+
283
+ if __name__ == '__main__':
284
+ pass
src/application/url_reader.py CHANGED
@@ -3,6 +3,8 @@ from bs4 import BeautifulSoup
3
  from newspaper import article, ArticleException, ArticleBinaryDataException
4
  import requests
5
 
 
 
6
  class URLReader():
7
  def __init__(self, url: string, newspaper: bool=True):
8
  self.url = url
@@ -10,6 +12,14 @@ class URLReader():
10
  self.title = None # string
11
  self.images = None # list of Image objects
12
  self.top_image = None # Image object
 
 
 
 
 
 
 
 
13
  self.newspaper = newspaper # True if using newspaper4k, False if using BS
14
  if self.newspaper is True:
15
  self.extract_content_newspaper()
@@ -73,4 +83,30 @@ class URLReader():
73
  paragraphs = soup.find_all('p')
74
  text = ' '.join([p.get_text() for p in paragraphs])
75
 
76
- self.text = text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  from newspaper import article, ArticleException, ArticleBinaryDataException
4
  import requests
5
 
6
+ # TODO: move this to a config file
7
+ MAX_URL_SIZE = 2000000 # ~2MB
8
  class URLReader():
9
  def __init__(self, url: string, newspaper: bool=True):
10
  self.url = url
 
12
  self.title = None # string
13
  self.images = None # list of Image objects
14
  self.top_image = None # Image object
15
+ self.is_extracted = False
16
+
17
+ url_size = self.get_size()
18
+ if url_size == None or url_size > MAX_URL_SIZE:
19
+ return
20
+ else:
21
+ self.is_extracted = True
22
+
23
  self.newspaper = newspaper # True if using newspaper4k, False if using BS
24
  if self.newspaper is True:
25
  self.extract_content_newspaper()
 
83
  paragraphs = soup.find_all('p')
84
  text = ' '.join([p.get_text() for p in paragraphs])
85
 
86
+ self.text = text
87
+
88
+ def get_size(self):
89
+ """
90
+ Retrieves the size of a URL's content using a HEAD request.
91
+
92
+ Args:
93
+ url: The URL to check.
94
+
95
+ Returns:
96
+ The size of the content in bytes, or None if the size cannot be determined
97
+ (e.g., due to network errors or missing Content-Length header).
98
+ """
99
+ try:
100
+ response = requests.head(self.url, allow_redirects=True, timeout=5) # Add timeout
101
+ response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
102
+
103
+ content_length = response.headers.get('Content-Length')
104
+ if content_length is not None:
105
+ return int(content_length)
106
+ else:
107
+ print(f"\t\t↑↑↑ Content-Length header not found")
108
+ return None
109
+
110
+ except requests.exceptions.RequestException as e:
111
+ print(f"\t\t↑↑↑ Error getting URL size: {e}")
112
+ return None
src/texts/Search_Text/evaluation.py CHANGED
@@ -40,15 +40,8 @@ def evaluation(texts):
40
  results = []
41
  index = 0
42
  for text in texts:
43
- <<<<<<< HEAD
44
- print("-" * 50)
45
- print(f"index = {index}\t {text[:100]}")
46
- bbc = [22, 32, 39, 43, 44, 64, 97]
47
- if index not in bbc:
48
- =======
49
  if index <= 82:
50
  print(f"index = {index}")
51
- >>>>>>> 59d2492d76034c795f0dbf2632f17d366fb31f14
52
  index += 1
53
  continue
54
 
 
40
  results = []
41
  index = 0
42
  for text in texts:
 
 
 
 
 
 
43
  if index <= 82:
44
  print(f"index = {index}")
 
45
  index += 1
46
  continue
47