Muhammad Abdur Rahman Saad
add exponential backoff logic for persistent network issues
7db09ee
"""Module to crawl the website 'https://www.stats.gov.cn' to fetch and process articles."""
import time
import urllib.request
import http.client
from datetime import datetime, timedelta
import random
from lxml import etree
from prefect import task, get_run_logger
from controllers.utils import crawl_by_url, encode
@task(name = "Data Collection - stats", log_prints = True)
def crawl(delta):
"""
Crawls the website "https://www.stats.gov.cn/sj/sjjd/" and
retrieves articles within a specified time range.
Args:
delta (int): The number of days to go back from the current date.
Returns:
None
Raises:
None
"""
logger = get_run_logger()
logger.info("stats.gov.hk")
i = 0
while i > -1:
if i == 0:
category_url = "https://www.stats.gov.cn/sj/sjjd/"
else:
category_url = f"https://www.stats.gov.cn/sj/sjjd/index_{i}.html"
i = i + 1
max_retries = 5
backoff_factor = 2
retries = max_retries
while retries > 0:
try:
req = urllib.request.urlopen(category_url, timeout=60)
text = req.read()
html_text = text.decode("utf-8")
page = etree.HTML(html_text)
articlelist = page.xpath("//div[contains(@class, 'list-content')]/ul/li")
break # Success, exit retry loop
except (urllib.error.URLError, http.client.IncompleteRead, TimeoutError) as error:
logger.info(f"Network error: {error}. Retries left: {retries-1}")
retries -= 1
if retries > 0:
sleep_time = backoff_factor ** (max_retries - retries) + random.uniform(0, 1)
time.sleep(sleep_time)
else:
logger.error(f"Failed to fetch {category_url} after {max_retries} attempts.")
articlelist = [] # Prevents UnboundLocalError
for article in articlelist:
if isinstance(article, etree._Element):
subelement = etree.tostring(article).decode()
subpage = etree.HTML(subelement)
date = encode(subpage.xpath("//span"))
parsed_datetime = datetime.strptime(
time.strftime("%Y-%m-%d", time.strptime(date, "%Y-%m-%d")),
"%Y-%m-%d")
if parsed_datetime < (datetime.today() - timedelta(days=delta)):
i = -1
else:
urls = subpage.xpath("//a[@class='fl pc_1600']/@href")
for url in urls:
try:
article = {}
url = url.replace('./', "https://www.stats.gov.cn/sj/sjjd/")
article['category'] = "Data Interpretation"
crawl_by_url(url, article)
except (urllib.error.URLError, etree.XMLSyntaxError) as error:
logger.info(error)