Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ import shap
|
|
5 |
import requests
|
6 |
from bs4 import BeautifulSoup
|
7 |
from sklearn.metrics.pairwise import cosine_similarity
|
8 |
-
from transformers import RobertaTokenizer,RobertaForSequenceClassification, pipeline
|
9 |
from IPython.core.display import HTML
|
10 |
model_dir = 'temp'
|
11 |
tokenizer = RobertaTokenizer.from_pretrained(model_dir)
|
@@ -64,8 +64,8 @@ def process_text(input_text):
|
|
64 |
|
65 |
# Calculate embeddings using the model
|
66 |
with torch.no_grad():
|
67 |
-
embedding1 =
|
68 |
-
embedding2 =
|
69 |
|
70 |
# Calculate cosine similarity between the input text and the article text embeddings
|
71 |
similarity = cosine_similarity(embedding1, embedding2)[0][0]
|
|
|
5 |
import requests
|
6 |
from bs4 import BeautifulSoup
|
7 |
from sklearn.metrics.pairwise import cosine_similarity
|
8 |
+
from transformers import RobertaTokenizer,RobertaForSequenceClassification, pipeline,RobertaModel
|
9 |
from IPython.core.display import HTML
|
10 |
model_dir = 'temp'
|
11 |
tokenizer = RobertaTokenizer.from_pretrained(model_dir)
|
|
|
64 |
|
65 |
# Calculate embeddings using the model
|
66 |
with torch.no_grad():
|
67 |
+
embedding1 = model1(**encoding1).last_hidden_state.mean(dim=1)
|
68 |
+
embedding2 = model1(**encoding2).last_hidden_state.mean(dim=1)
|
69 |
|
70 |
# Calculate cosine similarity between the input text and the article text embeddings
|
71 |
similarity = cosine_similarity(embedding1, embedding2)[0][0]
|