Update seo_analyzer.py
Browse files- seo_analyzer.py +49 -9
seo_analyzer.py
CHANGED
@@ -32,6 +32,7 @@ logging.basicConfig(
|
|
32 |
)
|
33 |
logger = logging.getLogger(__name__)
|
34 |
|
|
|
35 |
class SEOSpaceAnalyzer:
|
36 |
def __init__(self, max_urls: int = 20, max_workers: int = 4) -> None:
|
37 |
"""
|
@@ -82,7 +83,7 @@ class SEOSpaceAnalyzer:
|
|
82 |
|
83 |
def analyze_sitemap(self, sitemap_url: str) -> Tuple[Dict, List[str], Dict, Dict, List[Dict]]:
|
84 |
"""
|
85 |
-
Procesa el sitemap: extrae URLs, analiza cada página y devuelve datos agregados.
|
86 |
"""
|
87 |
try:
|
88 |
urls = self._parse_sitemap(sitemap_url)
|
@@ -141,7 +142,7 @@ class SEOSpaceAnalyzer:
|
|
141 |
return {'url': url, 'status': 'error', 'error': str(e)}
|
142 |
|
143 |
def _process_html(self, html: str, base_url: str) -> Dict:
|
144 |
-
"""Extrae y limpia el contenido HTML, metadatos y enlaces."""
|
145 |
soup = BeautifulSoup(html, 'html.parser')
|
146 |
clean_text = self._clean_text(soup.get_text())
|
147 |
return {
|
@@ -183,7 +184,7 @@ class SEOSpaceAnalyzer:
|
|
183 |
return re.sub(r'[^\w\sáéíóúñÁÉÍÓÚÑ]', ' ', text).strip()
|
184 |
|
185 |
def _extract_metadata(self, soup: BeautifulSoup) -> Dict:
|
186 |
-
"""Extrae metadatos relevantes de la página."""
|
187 |
metadata = {'title': '', 'description': '', 'keywords': [], 'og': {}}
|
188 |
if soup.title and soup.title.string:
|
189 |
metadata['title'] = soup.title.string.strip()[:200]
|
@@ -200,7 +201,7 @@ class SEOSpaceAnalyzer:
|
|
200 |
return metadata
|
201 |
|
202 |
def _extract_links(self, soup: BeautifulSoup, base_url: str) -> List[Dict]:
|
203 |
-
"""Extrae
|
204 |
links: List[Dict] = []
|
205 |
base_netloc = urlparse(base_url).netloc
|
206 |
for tag in soup.find_all('a', href=True):
|
@@ -222,12 +223,12 @@ class SEOSpaceAnalyzer:
|
|
222 |
return links
|
223 |
|
224 |
def _get_file_type(self, path: str) -> str:
|
225 |
-
"""Determina el tipo de archivo
|
226 |
ext = Path(path).suffix.lower()
|
227 |
return ext[1:] if ext else 'html'
|
228 |
|
229 |
def _parse_sitemap(self, sitemap_url: str) -> List[str]:
|
230 |
-
"""Parsea un sitemap XML para extraer URLs."""
|
231 |
try:
|
232 |
response = self.session.get(sitemap_url, timeout=10)
|
233 |
response.raise_for_status()
|
@@ -294,7 +295,7 @@ class SEOSpaceAnalyzer:
|
|
294 |
|
295 |
def _analyze_content(self, results: List[Dict]) -> Dict:
|
296 |
"""
|
297 |
-
|
298 |
"""
|
299 |
successful = [r for r in results if r.get('status') == 'success' and r.get('content')]
|
300 |
texts = [r['content'] for r in successful if len(r['content'].split()) > 10]
|
@@ -314,7 +315,7 @@ class SEOSpaceAnalyzer:
|
|
314 |
return {'top_keywords': top_keywords, 'content_samples': samples}
|
315 |
|
316 |
def _analyze_links(self, results: List[Dict]) -> Dict:
|
317 |
-
"""Genera un análisis de enlaces internos y
|
318 |
all_links = []
|
319 |
for result in results:
|
320 |
if result.get('links'):
|
@@ -330,4 +331,43 @@ class SEOSpaceAnalyzer:
|
|
330 |
}
|
331 |
|
332 |
def _generate_seo_recommendations(self, results: List[Dict]) -> List[str]:
|
333 |
-
"""Genera recomendaciones SEO
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
)
|
33 |
logger = logging.getLogger(__name__)
|
34 |
|
35 |
+
|
36 |
class SEOSpaceAnalyzer:
|
37 |
def __init__(self, max_urls: int = 20, max_workers: int = 4) -> None:
|
38 |
"""
|
|
|
83 |
|
84 |
def analyze_sitemap(self, sitemap_url: str) -> Tuple[Dict, List[str], Dict, Dict, List[Dict]]:
|
85 |
"""
|
86 |
+
Procesa el sitemap: extrae URLs, analiza cada página individualmente y devuelve datos agregados.
|
87 |
"""
|
88 |
try:
|
89 |
urls = self._parse_sitemap(sitemap_url)
|
|
|
142 |
return {'url': url, 'status': 'error', 'error': str(e)}
|
143 |
|
144 |
def _process_html(self, html: str, base_url: str) -> Dict:
|
145 |
+
"""Extrae y limpia el contenido HTML, metadatos y enlaces de la página."""
|
146 |
soup = BeautifulSoup(html, 'html.parser')
|
147 |
clean_text = self._clean_text(soup.get_text())
|
148 |
return {
|
|
|
184 |
return re.sub(r'[^\w\sáéíóúñÁÉÍÓÚÑ]', ' ', text).strip()
|
185 |
|
186 |
def _extract_metadata(self, soup: BeautifulSoup) -> Dict:
|
187 |
+
"""Extrae metadatos relevantes (título, descripción, keywords, Open Graph) de la página."""
|
188 |
metadata = {'title': '', 'description': '', 'keywords': [], 'og': {}}
|
189 |
if soup.title and soup.title.string:
|
190 |
metadata['title'] = soup.title.string.strip()[:200]
|
|
|
201 |
return metadata
|
202 |
|
203 |
def _extract_links(self, soup: BeautifulSoup, base_url: str) -> List[Dict]:
|
204 |
+
"""Extrae enlaces de la página, distinguiendo entre internos y externos."""
|
205 |
links: List[Dict] = []
|
206 |
base_netloc = urlparse(base_url).netloc
|
207 |
for tag in soup.find_all('a', href=True):
|
|
|
223 |
return links
|
224 |
|
225 |
def _get_file_type(self, path: str) -> str:
|
226 |
+
"""Determina el tipo de archivo según la extensión."""
|
227 |
ext = Path(path).suffix.lower()
|
228 |
return ext[1:] if ext else 'html'
|
229 |
|
230 |
def _parse_sitemap(self, sitemap_url: str) -> List[str]:
|
231 |
+
"""Parsea un sitemap XML (y posibles índices de sitemaps) para extraer URLs."""
|
232 |
try:
|
233 |
response = self.session.get(sitemap_url, timeout=10)
|
234 |
response.raise_for_status()
|
|
|
295 |
|
296 |
def _analyze_content(self, results: List[Dict]) -> Dict:
|
297 |
"""
|
298 |
+
Genera un análisis de contenido agregado usando TF-IDF para extraer las palabras clave principales y muestras.
|
299 |
"""
|
300 |
successful = [r for r in results if r.get('status') == 'success' and r.get('content')]
|
301 |
texts = [r['content'] for r in successful if len(r['content'].split()) > 10]
|
|
|
315 |
return {'top_keywords': top_keywords, 'content_samples': samples}
|
316 |
|
317 |
def _analyze_links(self, results: List[Dict]) -> Dict:
|
318 |
+
"""Genera un análisis de enlaces internos, dominios externos, anclas y tipos de archivos."""
|
319 |
all_links = []
|
320 |
for result in results:
|
321 |
if result.get('links'):
|
|
|
331 |
}
|
332 |
|
333 |
def _generate_seo_recommendations(self, results: List[Dict]) -> List[str]:
|
334 |
+
"""Genera recomendaciones SEO en base a las deficiencias encontradas en el análisis."""
|
335 |
+
successful = [r for r in results if r.get('status') == 'success']
|
336 |
+
if not successful:
|
337 |
+
return ["No se pudo analizar ningún contenido exitosamente"]
|
338 |
+
recs = []
|
339 |
+
missing_titles = sum(1 for r in successful if not r.get('metadata', {}).get('title'))
|
340 |
+
if missing_titles:
|
341 |
+
recs.append(f"📌 Añadir títulos a {missing_titles} páginas")
|
342 |
+
short_descriptions = sum(1 for r in successful if not r.get('metadata', {}).get('description'))
|
343 |
+
if short_descriptions:
|
344 |
+
recs.append(f"📌 Añadir meta descripciones a {short_descriptions} páginas")
|
345 |
+
short_content = sum(1 for r in successful if r.get('word_count', 0) < 300)
|
346 |
+
if short_content:
|
347 |
+
recs.append(f"📝 Ampliar contenido en {short_content} páginas (menos de 300 palabras)")
|
348 |
+
all_links = [link for r in results for link in r.get('links', [])]
|
349 |
+
if all_links:
|
350 |
+
df_links = pd.DataFrame(all_links)
|
351 |
+
internal_links = df_links[df_links['type'] == 'internal']
|
352 |
+
if len(internal_links) > 100:
|
353 |
+
recs.append(f"🔗 Optimizar estructura de enlaces internos ({len(internal_links)} enlaces)")
|
354 |
+
return recs if recs else ["✅ No se detectaron problemas críticos de SEO"]
|
355 |
+
|
356 |
+
def plot_internal_links(self, links_data: Dict) -> Any:
|
357 |
+
"""
|
358 |
+
Genera un gráfico de barras horizontales mostrando los 20 principales enlaces internos.
|
359 |
+
Si no existen datos, se muestra un mensaje en el gráfico.
|
360 |
+
"""
|
361 |
+
internal_links = links_data.get('internal_links', {})
|
362 |
+
fig, ax = plt.subplots()
|
363 |
+
if not internal_links:
|
364 |
+
ax.text(0.5, 0.5, 'No hay enlaces internos', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)
|
365 |
+
ax.axis('off')
|
366 |
+
else:
|
367 |
+
names = list(internal_links.keys())
|
368 |
+
counts = list(internal_links.values())
|
369 |
+
ax.barh(names, counts)
|
370 |
+
ax.set_xlabel("Cantidad de enlaces")
|
371 |
+
ax.set_title("Top 20 Enlaces Internos")
|
372 |
+
plt.tight_layout()
|
373 |
+
return fig
|