Spaces:
Sleeping
Sleeping
File size: 4,031 Bytes
d670adf 206a47d d670adf 24d576c d670adf 206a47d 7695f72 206a47d 7695f72 206a47d d670adf b0b3fd8 d670adf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
import requests
from bs4 import BeautifulSoup
from sentence_transformers import SentenceTransformer, util
from transformers import pipeline
import random
class URLValidator:
def rate_url_validity(self, user_query: str, url: str) -> dict:
"""Simulates rating the validity of a URL."""
content_relevance = random.randint(0, 100)
bias_score = random.randint(0, 100)
final_validity_score = (content_relevance + bias_score) // 2
return {
"raw_score": {
"Content Relevance": content_relevance,
"Bias Score": bias_score,
"Final Validity Score": final_validity_score
}
}
def __init__(self):
self.similarity_model = SentenceTransformer('sentence-transformers/all-mpnet-base-v2')
self.fake_news_classifier = pipeline("text-classification", model="mrm8488/bert-tiny-finetuned-fake-news-detection")
self.sentiment_analyzer = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-sentiment")
def fetch_page_content(self, url: str) -> str:
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
return " ".join([p.text for p in soup.find_all("p")])
except requests.RequestException:
return ""
def compute_similarity_score(self, user_query: str, content: str) -> int:
if not content:
return 0
return int(util.pytorch_cos_sim(self.similarity_model.encode(user_query), self.similarity_model.encode(content)).item() * 100)
def detect_bias(self, content: str) -> int:
if not content:
return 50
sentiment_result = self.sentiment_analyzer(content[:512])[0]
return 100 if sentiment_result["label"] == "POSITIVE" else 50 if sentiment_result["label"] == "NEUTRAL" else 30
def validate_url(self, user_query, url_to_check):
try:
result = self.rate_url_validity(user_query, url_to_check)
print("Validation Result:", result) # Debugging line
if "Validation Error" in result:
return {"Error": result["Validation Error"]}
return {
"Content Relevance Score": f"{result['raw_score']['Content Relevance']} / 100",
"Bias Score": f"{result['raw_score']['Bias Score']} / 100",
"Final Validity Score": f"{result['raw_score']['Final Validity Score']} / 100"
}
except Exception as e:
return {"Error": str(e)}
queries_urls = [
("Climate change effects", "https://www.nationalgeographic.com/environment/article/climate-change-overview"),
("COVID-19 vaccine effectiveness", "https://www.cdc.gov/coronavirus/2019-ncov/vaccines/effectiveness.html"),
("Latest AI advancements", "https://www.technologyreview.com/topic/artificial-intelligence"),
("Stock market trends", "https://www.bloomberg.com/markets"),
("Healthy diet tips", "https://www.healthline.com/nutrition/healthy-eating-tips"),
("Space exploration missions", "https://www.nasa.gov/missions"),
("Electric vehicle benefits", "https://www.tesla.com/benefits"),
("History of the internet", "https://www.history.com/topics/inventions/history-of-the-internet"),
("Python programming tutorials", "https://realpython.com"),
("Mental health awareness", "https://www.who.int/news-room/fact-sheets/detail/mental-health-strengthening-our-response")
]
validator = URLValidator()
results = [validator.rate_url_validity(query, url) for query, url in queries_urls]
for result in results:
print(result)
formatted_output = []
for query, url in queries_urls:
output_entry = {
"Query": query,
"URL": url,
"Function Rating": random.randint(1, 5), # Simulated rating
"Custom Rating": random.randint(1, 5) # Simulated rating
}
formatted_output.append(output_entry)
formatted_output
|