Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,12 +3,16 @@ import torch
|
|
3 |
from torch.nn.functional import softmax
|
4 |
import shap
|
5 |
import requests
|
|
|
|
|
6 |
from transformers import RobertaTokenizer,RobertaForSequenceClassification, pipeline
|
7 |
from IPython.core.display import HTML
|
8 |
model_dir = 'temp'
|
9 |
tokenizer = RobertaTokenizer.from_pretrained(model_dir)
|
10 |
model = RobertaForSequenceClassification.from_pretrained(model_dir)
|
11 |
#pipe = pipeline("text-classification", model="thugCodeNinja/robertatemp")
|
|
|
|
|
12 |
pipe = pipeline("text-classification",model=model,tokenizer=tokenizer)
|
13 |
def process_text(input_text):
|
14 |
if input_text:
|
@@ -34,24 +38,47 @@ def process_text(input_text):
|
|
34 |
return data
|
35 |
except Exception as e:
|
36 |
return {'error': str(e)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
def find_plagiarism(text):
|
38 |
search_results = search(text)
|
39 |
if 'items' not in search_results:
|
40 |
return []
|
41 |
similar_articles = []
|
42 |
for item in search_results['items']:
|
43 |
-
title = item.get('title', '')
|
44 |
link = item.get('link', '')
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
prediction = pipe([text])
|
49 |
explainer = shap.Explainer(pipe)
|
50 |
shap_values = explainer([text])
|
51 |
shap_plot_html = HTML(shap.plots.text(shap_values, display=False)).data
|
52 |
-
# HTML(shap.plots.text(shap_values, display=False))
|
53 |
-
# with open('rendered.html', 'w') as file:
|
54 |
-
# file.write(shap.plots.text(shap_values, display=False))
|
55 |
similar_articles = find_plagiarism(text)
|
56 |
|
57 |
return processed_result, prob, final_label, shap_plot_html,similar_articles
|
|
|
3 |
from torch.nn.functional import softmax
|
4 |
import shap
|
5 |
import requests
|
6 |
+
from bs4 import BeautifulSoup
|
7 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
8 |
from transformers import RobertaTokenizer,RobertaForSequenceClassification, pipeline
|
9 |
from IPython.core.display import HTML
|
10 |
model_dir = 'temp'
|
11 |
tokenizer = RobertaTokenizer.from_pretrained(model_dir)
|
12 |
model = RobertaForSequenceClassification.from_pretrained(model_dir)
|
13 |
#pipe = pipeline("text-classification", model="thugCodeNinja/robertatemp")
|
14 |
+
tokenizer1 = RobertaTokenizer.from_pretrained('roberta-base')
|
15 |
+
model1 = RobertaModel.from_pretrained('roberta-base')
|
16 |
pipe = pipeline("text-classification",model=model,tokenizer=tokenizer)
|
17 |
def process_text(input_text):
|
18 |
if input_text:
|
|
|
38 |
return data
|
39 |
except Exception as e:
|
40 |
return {'error': str(e)}
|
41 |
+
def get_article_text(url):
|
42 |
+
try:
|
43 |
+
response = requests.get(url)
|
44 |
+
if response.status_code == 200:
|
45 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
46 |
+
# Extract text from the article content (you may need to adjust this based on the website's structure)
|
47 |
+
article_text = ' '.join([p.get_text() for p in soup.find_all('p')])
|
48 |
+
return article_text
|
49 |
+
except Exception as e:
|
50 |
+
print(f"An error occurred: {e}")
|
51 |
+
return ''
|
52 |
def find_plagiarism(text):
|
53 |
search_results = search(text)
|
54 |
if 'items' not in search_results:
|
55 |
return []
|
56 |
similar_articles = []
|
57 |
for item in search_results['items']:
|
|
|
58 |
link = item.get('link', '')
|
59 |
+
article_text = get_article_text(link)
|
60 |
+
if article_text:
|
61 |
+
# Tokenize and encode the input text and the article text
|
62 |
+
encoding1 = tokenizer(text, max_length=512, truncation=True, padding=True, return_tensors="pt")
|
63 |
+
encoding2 = tokenizer(article_text, max_length=512, truncation=True, padding=True, return_tensors="pt")
|
64 |
+
|
65 |
+
# Calculate embeddings using the model
|
66 |
+
with torch.no_grad():
|
67 |
+
embedding1 = model(**encoding1).last_hidden_state.mean(dim=1)
|
68 |
+
embedding2 = model(**encoding2).last_hidden_state.mean(dim=1)
|
69 |
+
|
70 |
+
# Calculate cosine similarity between the input text and the article text embeddings
|
71 |
+
similarity = cosine_similarity(embedding1, embedding2)[0][0]
|
72 |
+
similar_articles.append({'Link': link, 'Similarity': similarity})
|
73 |
+
similar_articles = sorted(similar_articles, key=lambda x: x['Similarity'], reverse=True)
|
74 |
+
threshold = 0.5 # Adjust the threshold as needed
|
75 |
+
similar_articles = [article for article in similar_articles if article['Similarity'] > threshold]
|
76 |
+
return similar_articles[:5]
|
77 |
|
78 |
prediction = pipe([text])
|
79 |
explainer = shap.Explainer(pipe)
|
80 |
shap_values = explainer([text])
|
81 |
shap_plot_html = HTML(shap.plots.text(shap_values, display=False)).data
|
|
|
|
|
|
|
82 |
similar_articles = find_plagiarism(text)
|
83 |
|
84 |
return processed_result, prob, final_label, shap_plot_html,similar_articles
|