File size: 2,943 Bytes
74475ac
 
 
 
 
 
4259f95
74475ac
 
 
5d719e2
74475ac
 
 
 
 
 
 
 
 
 
4259f95
 
74475ac
 
 
 
 
 
 
 
 
 
cc76656
74475ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d705151
4259f95
d705151
74475ac
4259f95
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
"""Module to crawl the website 'https://www.mofcom.gov.cn' to fetch and process articles."""
import time
import urllib.request
from datetime import datetime, timedelta

from lxml import etree
from prefect import task, get_run_logger

from controllers.utils import crawl_by_url

@task(name = "Data Collection - mofcom", log_prints = True)
def crawl(delta):
    """
    Crawls the website http://www.mofcom.gov.cn to retrieve articles based on the specified delta.

    Parameters:
    - delta (int): The number of days in the past from today to retrieve articles.

    Returns:
    None
    """
    logger = get_run_logger()
    logger.info("mofcom.gov.cn")
    categories = ['jdzhsw', 'jdgnmy', 'jddwmy', 'jdtzhz']
    for category in categories:
        i = 1
        while i > -1:
            if i == 1:
                url = f"http://www.mofcom.gov.cn/article/zcjd/{category}/"
            else:
                url = f"http://www.mofcom.gov.cn/article/zcjd/{category}/?{i}"
            i = i + 1
            try:
                req = urllib.request.urlopen(url, timeout=60)
                text = req.read()
                html_text = text.decode("utf-8")
                page = etree.HTML(html_text)
                articlelist = page.xpath(
                        "//section[contains(@class, 'listCon iListCon f-mt30')]/ul/li")
                for article in articlelist:
                    if isinstance(article, etree._Element):
                        subelement = etree.tostring(article).decode()
                        subpage = etree.HTML(subelement)
                        date = subpage.xpath("//span/text()")[0]
                        parsed_datetime = datetime.strptime(
                                time.strftime("%Y-%m-%d",
                                              time.strptime(date, "%Y-%m-%d %H:%M:%S")),
                                              "%Y-%m-%d")
                        if parsed_datetime < (datetime.today() - timedelta(days=delta)):
                            i = -1
                        else:
                            urls = subpage.xpath("//a/@href")
                            for url in urls:
                                try:
                                    article = {}
                                    if '/article/zcjd' in url:
                                        url = "http://www.mofcom.gov.cn" + url
                                        article['category'] = "Policy Interpretation"
                                    else:
                                        article['category'] = "Policy Release"
                                    crawl_by_url(url, article)
                                except (urllib.error.URLError, etree.XMLSyntaxError) as error:
                                    logger.error(error)
            except (urllib.error.URLError, etree.XMLSyntaxError) as error:
                i = -1
                logger.error(error)