|
"""Module to crawl the website 'https://www.safe.gov.cn' to fetch and process articles.""" |
|
import time |
|
import urllib.request |
|
from datetime import datetime, timedelta |
|
import random |
|
|
|
from lxml import etree |
|
from prefect import task, get_run_logger |
|
|
|
from controllers.utils import crawl_by_url |
|
|
|
@task(name = "Data Collection - safe", log_prints = True) |
|
def crawl(delta): |
|
""" |
|
Crawls the website "https://www.safe.gov.cn" to retrieve policy interpretation and data interpretation articles. |
|
|
|
Args: |
|
delta (int): The number of days in the past to consider for crawling. |
|
|
|
Returns: |
|
None |
|
""" |
|
logger = get_run_logger() |
|
logger.info("safe.gov.cn") |
|
i = 1 |
|
while i > -1: |
|
if i == 1: |
|
category_url = "https://www.safe.gov.cn/safe/zcfgjd/index.html" |
|
else: |
|
category_url = f"https://www.safe.gov.cn/safe/zcfgjd/index_{i}.html" |
|
i = i + 1 |
|
max_retries = 5 |
|
backoff_factor = 2 |
|
retries = max_retries |
|
while retries > 0: |
|
try: |
|
req = urllib.request.urlopen(category_url, timeout=120) |
|
text = req.read() |
|
html_text = text.decode("utf-8") |
|
page = etree.HTML(html_text) |
|
articlelist = page.xpath("//div[contains(@class, 'list_conr')]/ul/li") |
|
break |
|
except (urllib.error.URLError, TimeoutError) as error: |
|
logger.error(f"Network error: {error}. Retries left: {retries-1}") |
|
retries -= 1 |
|
if retries > 0: |
|
sleep_time = backoff_factor ** (max_retries - retries) + random.uniform(0, 1) |
|
time.sleep(sleep_time) |
|
else: |
|
logger.error(f"Failed to fetch {category_url} after {max_retries} attempts.") |
|
articlelist = [] |
|
for article in articlelist: |
|
if isinstance(article, etree._Element): |
|
subelement = etree.tostring(article).decode() |
|
subpage = etree.HTML(subelement) |
|
date = subpage.xpath("//dd/text()")[0] |
|
parsed_datetime = datetime.strptime( |
|
time.strftime("%Y-%m-%d", time.strptime(date, "%Y-%m-%d")), |
|
"%Y-%m-%d") |
|
if parsed_datetime < (datetime.today() - timedelta(days=delta)): |
|
i = -1 |
|
else: |
|
urls = subpage.xpath("//a/@href") |
|
for url in urls: |
|
try: |
|
article = {} |
|
url = "https://www.safe.gov.cn" + url |
|
article['category'] = "Policy Interpretation" |
|
crawl_by_url(url, article) |
|
except (urllib.error.URLError, etree.XMLSyntaxError) as error: |
|
logger.error(error) |
|
|
|
i = 1 |
|
while i > -1: |
|
if i == 1: |
|
category_url = "https://www.safe.gov.cn/safe/sjjd/index.html" |
|
else: |
|
category_url = f"https://www.safe.gov.cn/safe/sjjd/index_{i}.html" |
|
i = i + 1 |
|
req = urllib.request.urlopen(category_url, timeout=60) |
|
text = req.read() |
|
html_text = text.decode("utf-8") |
|
page = etree.HTML(html_text) |
|
articlelist = page.xpath("//div[contains(@class, 'list_conr')]/ul/li") |
|
for article in articlelist: |
|
if isinstance(article, etree._Element): |
|
subelement = etree.tostring(article).decode() |
|
subpage = etree.HTML(subelement) |
|
date = subpage.xpath("//dd/text()")[0] |
|
parsed_datetime = datetime.strptime( |
|
time.strftime("%Y-%m-%d", time.strptime(date, "%Y-%m-%d")), |
|
"%Y-%m-%d") |
|
if parsed_datetime < (datetime.today() - timedelta(days=delta)): |
|
i = -1 |
|
else: |
|
urls = subpage.xpath("//a/@href") |
|
for url in urls: |
|
try: |
|
article = {} |
|
url = "https://www.safe.gov.cn" + url |
|
article['category'] = "Data Interpretation" |
|
crawl_by_url(url, article) |
|
except (urllib.error.URLError, etree.XMLSyntaxError) as error: |
|
logger.error(error) |
|
|