SaiKumar1627 commited on
Commit
c05f206
·
verified ·
1 Parent(s): 80eb2ce

Update deliverable2.py

Browse files
Files changed (1) hide show
  1. deliverable2.py +42 -43
deliverable2.py CHANGED
@@ -1,6 +1,5 @@
1
  import requests
2
  from bs4 import BeautifulSoup
3
- import pandas as pd
4
  from sentence_transformers import SentenceTransformer, util
5
  from transformers import pipeline
6
 
@@ -19,7 +18,7 @@ class URLValidator:
19
  def fetch_page_content(self, url: str) -> str:
20
  """ Fetches and extracts text content from the given URL, handling errors gracefully. """
21
  try:
22
- headers = {"User-Agent": "Mozilla/5.0"}
23
  response = requests.get(url, timeout=10, headers=headers)
24
  response.raise_for_status()
25
  soup = BeautifulSoup(response.text, "html.parser")
@@ -81,50 +80,50 @@ class URLValidator:
81
  return " ".join(reasons) if reasons else "This source is highly credible and relevant."
82
 
83
  def rate_url_validity(self, user_query: str, url: str):
84
- """ Main function to evaluate the validity of a webpage. """
85
- content = self.fetch_page_content(url)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
- # If content fetching failed, return a properly structured response
88
- if "Error" in content:
89
  return {
90
- "raw_score": {
91
- "Domain Trust": 0,
92
- "Content Relevance": 0,
93
- "Fact-Check Score": 0,
94
- "Bias Score": 0,
95
- "Final Validity Score": 0
96
  },
97
  "stars": {
98
- "icon": "❌"
99
  },
100
- "explanation": content # Display the error message
101
  }
102
-
103
- domain_trust = self.get_domain_trust(url, content)
104
- similarity_score = self.compute_similarity_score(user_query, content)
105
- fact_check_score = self.check_facts(content)
106
- bias_score = self.detect_bias(content)
107
-
108
- final_score = (
109
- (0.3 * domain_trust) +
110
- (0.3 * similarity_score) +
111
- (0.2 * fact_check_score) +
112
- (0.2 * bias_score)
113
- )
114
-
115
- stars, icon = self.get_star_rating(final_score)
116
- explanation = self.generate_explanation(domain_trust, similarity_score, fact_check_score, bias_score, final_score)
117
-
118
- return {
119
- "raw_score": {
120
- "Domain Trust": domain_trust,
121
- "Content Relevance": similarity_score,
122
- "Fact-Check Score": fact_check_score,
123
- "Bias Score": bias_score,
124
- "Final Validity Score": final_score
125
- },
126
- "stars": {
127
- "icon": icon
128
- },
129
- "explanation": explanation
130
- }
 
1
  import requests
2
  from bs4 import BeautifulSoup
 
3
  from sentence_transformers import SentenceTransformer, util
4
  from transformers import pipeline
5
 
 
18
  def fetch_page_content(self, url: str) -> str:
19
  """ Fetches and extracts text content from the given URL, handling errors gracefully. """
20
  try:
21
+ headers = {"User-Agent": "Mozilla/5.0"} # Helps bypass some bot protections
22
  response = requests.get(url, timeout=10, headers=headers)
23
  response.raise_for_status()
24
  soup = BeautifulSoup(response.text, "html.parser")
 
80
  return " ".join(reasons) if reasons else "This source is highly credible and relevant."
81
 
82
  def rate_url_validity(self, user_query: str, url: str):
83
+ """ Main function to evaluate the validity of a webpage. """
84
+ content = self.fetch_page_content(url)
85
+
86
+ # If content fetching failed, return a properly structured response
87
+ if "Error" in content:
88
+ return {
89
+ "raw_score": {
90
+ "Domain Trust": 0,
91
+ "Content Relevance": 0,
92
+ "Fact-Check Score": 0,
93
+ "Bias Score": 0,
94
+ "Final Validity Score": 0
95
+ },
96
+ "stars": {
97
+ "icon": "❌"
98
+ },
99
+ "explanation": content # Display the error message
100
+ }
101
+
102
+ domain_trust = self.get_domain_trust(url, content)
103
+ similarity_score = self.compute_similarity_score(user_query, content)
104
+ fact_check_score = self.check_facts(content)
105
+ bias_score = self.detect_bias(content)
106
+
107
+ final_score = (
108
+ (0.3 * domain_trust) +
109
+ (0.3 * similarity_score) +
110
+ (0.2 * fact_check_score) +
111
+ (0.2 * bias_score)
112
+ )
113
+
114
+ stars, icon = self.get_star_rating(final_score)
115
+ explanation = self.generate_explanation(domain_trust, similarity_score, fact_check_score, bias_score, final_score)
116
 
 
 
117
  return {
118
+ "raw_score": {
119
+ "Domain Trust": domain_trust,
120
+ "Content Relevance": similarity_score,
121
+ "Fact-Check Score": fact_check_score,
122
+ "Bias Score": bias_score,
123
+ "Final Validity Score": final_score
124
  },
125
  "stars": {
126
+ "icon": icon
127
  },
128
+ "explanation": explanation
129
  }