Merlintxu commited on
Commit
c2b829f
·
verified ·
1 Parent(s): f415fc9

Update seo_analyzer.py

Browse files
Files changed (1) hide show
  1. seo_analyzer.py +108 -65
seo_analyzer.py CHANGED
@@ -6,7 +6,7 @@ import PyPDF2
6
  import numpy as np
7
  import pandas as pd
8
  from io import BytesIO
9
- from typing import List, Dict, Tuple
10
  from urllib.parse import urlparse, urljoin
11
  from concurrent.futures import ThreadPoolExecutor, as_completed
12
  from bs4 import BeautifulSoup
@@ -22,15 +22,15 @@ import spacy
22
  import matplotlib.pyplot as plt
23
  from utils import sanitize_filename
24
 
25
- # Palabras no permitidas en SEO financiero/bancario
 
 
 
26
  PROHIBITED_TERMS = [
27
  "gratis", "garantizado", "rentabilidad asegurada", "sin compromiso",
28
  "resultados inmediatos", "cero riesgo", "sin letra pequeña"
29
  ]
30
 
31
- logging.basicConfig(level=logging.INFO)
32
- logger = logging.getLogger(__name__)
33
-
34
  class SEOSpaceAnalyzer:
35
  def __init__(self, max_urls: int = 20, max_workers: int = 4):
36
  self.max_urls = max_urls
@@ -64,20 +64,36 @@ class SEOSpaceAnalyzer:
64
  "zeroshot": pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
65
  }
66
 
67
- def analyze_sitemap(self, sitemap_url: str) -> Tuple:
 
 
 
 
 
68
  urls = self._parse_sitemap(sitemap_url)
69
  if not urls:
70
- return {"error": "No se pudieron extraer URLs"}, [], {}, {}, [], {}, {}, {}
71
-
72
  results = []
73
- with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
74
- futures = {executor.submit(self._process_url, url): url for url in urls[:self.max_urls]}
75
- for future in as_completed(futures):
76
- try:
77
- results.append(future.result())
78
- except Exception as e:
79
- results.append({"url": futures[future], "status": "error", "error": str(e)})
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  summaries, entities = self._apply_nlp(results)
82
  similarities = self._compute_similarity(results)
83
  flags = self._flag_prohibited_terms(results)
@@ -100,10 +116,11 @@ class SEOSpaceAnalyzer:
100
  }
101
 
102
  a = self.current_analysis
 
103
  return (
104
  a["stats"], a["recommendations"], a["content_analysis"],
105
- a["links"], a["details"], a["summaries"],
106
- a["similarities"], a["seo_tags"]
107
  )
108
 
109
  def _process_url(self, url: str) -> Dict:
@@ -118,7 +135,7 @@ class SEOSpaceAnalyzer:
118
 
119
  def _process_html(self, url: str, html: str) -> Dict:
120
  soup = BeautifulSoup(html, "html.parser")
121
- text = re.sub(r"\\s+", " ", soup.get_text())
122
  return {
123
  "url": url,
124
  "type": "html",
@@ -144,7 +161,7 @@ class SEOSpaceAnalyzer:
144
  except Exception as e:
145
  return {"url": url, "status": "error", "error": str(e)}
146
 
147
- def _extract_metadata(self, soup) -> Dict:
148
  meta = {"title": "", "description": ""}
149
  if soup.title:
150
  meta["title"] = soup.title.string.strip()
@@ -153,7 +170,7 @@ class SEOSpaceAnalyzer:
153
  meta["description"] = tag.get("content", "")
154
  return meta
155
 
156
- def _extract_links(self, soup, base_url) -> List[Dict]:
157
  links = []
158
  base_domain = urlparse(base_url).netloc
159
  for tag in soup.find_all("a", href=True):
@@ -172,25 +189,80 @@ class SEOSpaceAnalyzer:
172
  r = self.session.get(sitemap_url)
173
  soup = BeautifulSoup(r.text, "lxml-xml")
174
  return [loc.text for loc in soup.find_all("loc")]
175
- except:
 
176
  return []
177
 
178
- def _apply_nlp(self, results) -> Tuple[Dict, Dict]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  summaries, entities = {}, {}
180
  for r in results:
181
- if r.get("status") != "success" or not r.get("content"): continue
 
182
  text = r["content"][:1024]
183
  try:
184
  summaries[r["url"]] = self.models["summarizer"](text, max_length=100, min_length=30)[0]["summary_text"]
185
  ents = self.models["ner"](text)
186
  entities[r["url"]] = list({e["word"] for e in ents if e["score"] > 0.8})
187
- except:
188
  continue
189
  return summaries, entities
190
 
191
- def _compute_similarity(self, results) -> Dict[str, List[Dict]]:
192
  docs = [(r["url"], r["content"]) for r in results if r.get("status") == "success" and r.get("content")]
193
- if len(docs) < 2: return {}
 
194
  urls, texts = zip(*docs)
195
  emb = self.models["semantic"].encode(texts, convert_to_tensor=True)
196
  sim = util.pytorch_cos_sim(emb, emb)
@@ -200,7 +272,7 @@ class SEOSpaceAnalyzer:
200
  for i in range(len(urls))
201
  }
202
 
203
- def _flag_prohibited_terms(self, results) -> Dict[str, List[str]]:
204
  flags = {}
205
  for r in results:
206
  found = [term for term in PROHIBITED_TERMS if term in r.get("content", "").lower()]
@@ -208,32 +280,33 @@ class SEOSpaceAnalyzer:
208
  flags[r["url"]] = found
209
  return flags
210
 
211
- def _classify_topics(self, results) -> Dict[str, List[str]]:
212
  labels = [
213
  "hipotecas", "préstamos", "cuentas", "tarjetas",
214
  "seguros", "inversión", "educación financiera"
215
  ]
216
  topics = {}
217
  for r in results:
218
- if r.get("status") != "success": continue
 
219
  try:
220
  res = self.models["zeroshot"](r["content"][:1000], candidate_labels=labels, multi_label=True)
221
  topics[r["url"]] = [l for l, s in zip(res["labels"], res["scores"]) if s > 0.5]
222
- except:
223
  continue
224
  return topics
225
 
226
- def _generate_seo_tags(self, results, summaries, topics, flags) -> Dict[str, Dict]:
227
  seo_tags = {}
228
  for r in results:
229
  url = r["url"]
230
  base = summaries.get(url, r.get("content", "")[:300])
231
- topic = topics.get(url, ["contenido"])[0]
232
  try:
233
  prompt = f"Genera un título SEO formal y una meta descripción para contenido sobre {topic}: {base}"
234
  output = self.models["summarizer"](prompt, max_length=60, min_length=20)[0]["summary_text"]
235
  title, desc = output.split(".")[0], output
236
- except:
237
  title, desc = "", ""
238
  seo_tags[url] = {
239
  "title": title,
@@ -242,37 +315,7 @@ class SEOSpaceAnalyzer:
242
  }
243
  return seo_tags
244
 
245
- def _calculate_stats(self, results):
246
- success = [r for r in results if r.get("status") == "success"]
247
- return {
248
- "total": len(results),
249
- "success": len(success),
250
- "failed": len(results) - len(success),
251
- "avg_words": round(np.mean([r.get("word_count", 0) for r in success]), 1)
252
- }
253
-
254
- def _analyze_content(self, results):
255
- texts = [r["content"] for r in results if r.get("status") == "success" and r.get("content")]
256
- if not texts:
257
- return {}
258
- tfidf = TfidfVectorizer(max_features=20, stop_words=list(self.models["spacy"].Defaults.stop_words))
259
- tfidf.fit(texts)
260
- top = tfidf.get_feature_names_out().tolist()
261
- return {"top_keywords": top, "samples": texts[:3]}
262
-
263
- def _analyze_links(self, results):
264
- all_links = []
265
- for r in results:
266
- all_links.extend(r.get("links", []))
267
- if not all_links:
268
- return {}
269
- df = pd.DataFrame(all_links)
270
- return {
271
- "internal_links": df[df["type"] == "internal"]["url"].value_counts().head(10).to_dict(),
272
- "external_links": df[df["type"] == "external"]["url"].value_counts().head(10).to_dict()
273
- }
274
-
275
- def _generate_recommendations(self, results):
276
  recs = []
277
  if any(r.get("word_count", 0) < 300 for r in results):
278
  recs.append("✍️ Algunos contenidos son demasiado breves (<300 palabras)")
@@ -280,7 +323,7 @@ class SEOSpaceAnalyzer:
280
  recs.append("⚠️ Detectado uso de lenguaje no permitido")
281
  return recs or ["✅ Todo parece correcto"]
282
 
283
- def plot_internal_links(self, links: Dict):
284
  if not links or not links.get("internal_links"):
285
  fig, ax = plt.subplots()
286
  ax.text(0.5, 0.5, "No hay enlaces internos", ha="center")
 
6
  import numpy as np
7
  import pandas as pd
8
  from io import BytesIO
9
+ from typing import List, Dict, Tuple, Optional
10
  from urllib.parse import urlparse, urljoin
11
  from concurrent.futures import ThreadPoolExecutor, as_completed
12
  from bs4 import BeautifulSoup
 
22
  import matplotlib.pyplot as plt
23
  from utils import sanitize_filename
24
 
25
+ logging.basicConfig(level=logging.INFO)
26
+ logger = logging.getLogger(__name__)
27
+
28
+ # Términos prohibidos (ejemplo)
29
  PROHIBITED_TERMS = [
30
  "gratis", "garantizado", "rentabilidad asegurada", "sin compromiso",
31
  "resultados inmediatos", "cero riesgo", "sin letra pequeña"
32
  ]
33
 
 
 
 
34
  class SEOSpaceAnalyzer:
35
  def __init__(self, max_urls: int = 20, max_workers: int = 4):
36
  self.max_urls = max_urls
 
64
  "zeroshot": pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
65
  }
66
 
67
+ def analyze_sitemap(
68
+ self,
69
+ sitemap_url: str,
70
+ progress_callback: Optional[callable] = None,
71
+ status_callback: Optional[callable] = None
72
+ ) -> Tuple:
73
  urls = self._parse_sitemap(sitemap_url)
74
  if not urls:
75
+ return {"error": "No se pudieron extraer URLs"}, [], {}, {}, {}, {}, {}
 
76
  results = []
77
+ batch_size = 5
78
+ num_urls = min(len(urls), self.max_urls)
79
+ total_batches = (num_urls + batch_size - 1) // batch_size
 
 
 
 
80
 
81
+ for batch_index in range(total_batches):
82
+ start = batch_index * batch_size
83
+ batch_urls = urls[start:start+batch_size]
84
+ if status_callback:
85
+ status_callback(f"Procesando batch {batch_index+1}/{total_batches}: {batch_urls}")
86
+ with ThreadPoolExecutor(max_workers=len(batch_urls)) as executor:
87
+ futures = {executor.submit(self._process_url, url): url for url in batch_urls}
88
+ for future in as_completed(futures):
89
+ try:
90
+ results.append(future.result())
91
+ except Exception as e:
92
+ results.append({"url": futures[future], "status": "error", "error": str(e)})
93
+ if progress_callback:
94
+ progress_callback(batch_index+1, total_batches)
95
+
96
+ # Aplicar procesos de NLP a los resultados
97
  summaries, entities = self._apply_nlp(results)
98
  similarities = self._compute_similarity(results)
99
  flags = self._flag_prohibited_terms(results)
 
116
  }
117
 
118
  a = self.current_analysis
119
+ # Retornamos 7 outputs (sin summaries, que no se muestran en la UI)
120
  return (
121
  a["stats"], a["recommendations"], a["content_analysis"],
122
+ a["links"], a["details"], a["similarities"],
123
+ a["seo_tags"]
124
  )
125
 
126
  def _process_url(self, url: str) -> Dict:
 
135
 
136
  def _process_html(self, url: str, html: str) -> Dict:
137
  soup = BeautifulSoup(html, "html.parser")
138
+ text = re.sub(r"\s+", " ", soup.get_text())
139
  return {
140
  "url": url,
141
  "type": "html",
 
161
  except Exception as e:
162
  return {"url": url, "status": "error", "error": str(e)}
163
 
164
+ def _extract_metadata(self, soup: BeautifulSoup) -> Dict:
165
  meta = {"title": "", "description": ""}
166
  if soup.title:
167
  meta["title"] = soup.title.string.strip()
 
170
  meta["description"] = tag.get("content", "")
171
  return meta
172
 
173
+ def _extract_links(self, soup: BeautifulSoup, base_url: str) -> List[Dict]:
174
  links = []
175
  base_domain = urlparse(base_url).netloc
176
  for tag in soup.find_all("a", href=True):
 
189
  r = self.session.get(sitemap_url)
190
  soup = BeautifulSoup(r.text, "lxml-xml")
191
  return [loc.text for loc in soup.find_all("loc")]
192
+ except Exception as e:
193
+ logger.error(f"Error al parsear sitemap {sitemap_url}: {e}")
194
  return []
195
 
196
+ def _save_content(self, url: str, content: bytes) -> None:
197
+ try:
198
+ parsed = urlparse(url)
199
+ domain_dir = self.base_dir / parsed.netloc
200
+ path = parsed.path.lstrip("/")
201
+ if not path or path.endswith("/"):
202
+ path = os.path.join(path, "index.html")
203
+ safe_path = sanitize_filename(path)
204
+ save_path = domain_dir / safe_path
205
+ save_path.parent.mkdir(parents=True, exist_ok=True)
206
+ new_hash = hash(content)
207
+ if save_path.exists():
208
+ with open(save_path, "rb") as f:
209
+ if hash(f.read()) == new_hash:
210
+ logger.debug(f"El contenido de {url} ya está guardado.")
211
+ return
212
+ with open(save_path, "wb") as f:
213
+ f.write(content)
214
+ logger.info(f"Guardado contenido en: {save_path}")
215
+ except Exception as e:
216
+ logger.error(f"Error guardando contenido para {url}: {e}")
217
+
218
+ def _calculate_stats(self, results: List[Dict]) -> Dict:
219
+ success = [r for r in results if r.get("status") == "success"]
220
+ return {
221
+ "total": len(results),
222
+ "success": len(success),
223
+ "failed": len(results) - len(success),
224
+ "avg_words": round(np.mean([r.get("word_count", 0) for r in success]) if success else 0, 1)
225
+ }
226
+
227
+ def _analyze_content(self, results: List[Dict]) -> Dict:
228
+ texts = [r["content"] for r in results if r.get("status") == "success" and r.get("content")]
229
+ if not texts:
230
+ return {}
231
+ tfidf = TfidfVectorizer(max_features=20, stop_words=list(self.models["spacy"].Defaults.stop_words))
232
+ tfidf.fit(texts)
233
+ top = tfidf.get_feature_names_out().tolist()
234
+ return {"top_keywords": top, "samples": texts[:3]}
235
+
236
+ def _analyze_links(self, results: List[Dict]) -> Dict:
237
+ all_links = []
238
+ for r in results:
239
+ all_links.extend(r.get("links", []))
240
+ if not all_links:
241
+ return {}
242
+ df = pd.DataFrame(all_links)
243
+ return {
244
+ "internal_links": df[df["type"] == "internal"]["url"].value_counts().head(10).to_dict(),
245
+ "external_links": df[df["type"] == "external"]["url"].value_counts().head(10).to_dict()
246
+ }
247
+
248
+ def _apply_nlp(self, results: List[Dict]) -> Tuple[Dict, Dict]:
249
  summaries, entities = {}, {}
250
  for r in results:
251
+ if r.get("status") != "success" or not r.get("content"):
252
+ continue
253
  text = r["content"][:1024]
254
  try:
255
  summaries[r["url"]] = self.models["summarizer"](text, max_length=100, min_length=30)[0]["summary_text"]
256
  ents = self.models["ner"](text)
257
  entities[r["url"]] = list({e["word"] for e in ents if e["score"] > 0.8})
258
+ except Exception as e:
259
  continue
260
  return summaries, entities
261
 
262
+ def _compute_similarity(self, results: List[Dict]) -> Dict[str, List[Dict]]:
263
  docs = [(r["url"], r["content"]) for r in results if r.get("status") == "success" and r.get("content")]
264
+ if len(docs) < 2:
265
+ return {}
266
  urls, texts = zip(*docs)
267
  emb = self.models["semantic"].encode(texts, convert_to_tensor=True)
268
  sim = util.pytorch_cos_sim(emb, emb)
 
272
  for i in range(len(urls))
273
  }
274
 
275
+ def _flag_prohibited_terms(self, results: List[Dict]) -> Dict[str, List[str]]:
276
  flags = {}
277
  for r in results:
278
  found = [term for term in PROHIBITED_TERMS if term in r.get("content", "").lower()]
 
280
  flags[r["url"]] = found
281
  return flags
282
 
283
+ def _classify_topics(self, results: List[Dict]) -> Dict[str, List[str]]:
284
  labels = [
285
  "hipotecas", "préstamos", "cuentas", "tarjetas",
286
  "seguros", "inversión", "educación financiera"
287
  ]
288
  topics = {}
289
  for r in results:
290
+ if r.get("status") != "success":
291
+ continue
292
  try:
293
  res = self.models["zeroshot"](r["content"][:1000], candidate_labels=labels, multi_label=True)
294
  topics[r["url"]] = [l for l, s in zip(res["labels"], res["scores"]) if s > 0.5]
295
+ except Exception as e:
296
  continue
297
  return topics
298
 
299
+ def _generate_seo_tags(self, results: List[Dict], summaries: Dict, topics: Dict, flags: Dict) -> Dict[str, Dict]:
300
  seo_tags = {}
301
  for r in results:
302
  url = r["url"]
303
  base = summaries.get(url, r.get("content", "")[:300])
304
+ topic = topics.get(url, ["contenido"])[0] if topics.get(url) else "contenido"
305
  try:
306
  prompt = f"Genera un título SEO formal y una meta descripción para contenido sobre {topic}: {base}"
307
  output = self.models["summarizer"](prompt, max_length=60, min_length=20)[0]["summary_text"]
308
  title, desc = output.split(".")[0], output
309
+ except Exception as e:
310
  title, desc = "", ""
311
  seo_tags[url] = {
312
  "title": title,
 
315
  }
316
  return seo_tags
317
 
318
+ def _generate_recommendations(self, results: List[Dict]) -> List[str]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  recs = []
320
  if any(r.get("word_count", 0) < 300 for r in results):
321
  recs.append("✍️ Algunos contenidos son demasiado breves (<300 palabras)")
 
323
  recs.append("⚠️ Detectado uso de lenguaje no permitido")
324
  return recs or ["✅ Todo parece correcto"]
325
 
326
+ def plot_internal_links(self, links: Dict) -> any:
327
  if not links or not links.get("internal_links"):
328
  fig, ax = plt.subplots()
329
  ax.text(0.5, 0.5, "No hay enlaces internos", ha="center")