File size: 1,465 Bytes
57c4050
 
 
 
 
42ba1cc
57c4050
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b2a3d45
57c4050
42ba1cc
57c4050
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import uuid
import time
import urllib.request
from datetime import datetime, timedelta
from lxml import etree
from utils import encode, crawl

i = 0
while i > -1:
    if i == 0:
        CATEGORY_URL = "https://www.stats.gov.cn/sj/sjjd/"
    else:
        CATEGORY_URL = f"https://www.stats.gov.cn/sj/sjjd/index_{i}.html"
    i = i + 1
    req = urllib.request.urlopen(CATEGORY_URL)
    text = req.read()
    html_text = text.decode("utf-8")
    page = etree.HTML(html_text)
    articlelist = page.xpath("//div[contains(@class, 'list-content')]/ul/li")
    for article in articlelist:
        if isinstance(article, etree._Element):
            subelement = etree.tostring(article).decode()
            subpage = etree.HTML(subelement)
            date = encode(subpage.xpath("//span"))
            parsed_datetime = datetime.strptime(time.strftime("%Y-%m-%d", time.strptime(date,"%Y-%m-%d")), "%Y-%m-%d")
            if  parsed_datetime < (datetime.today() - timedelta(days=183)):
                i = -1
            else:
                urls = subpage.xpath("//a[@class='fl pc_1600']/@href")
                for url in urls:
                    try:
                        article = {}
                        url = url.replace('./', "https://www.stats.gov.cn/sj/sjjd/")
                        article['category']= "Data Interpretation"
                        crawl(url, article)
                    except Exception as error:
                      print(error)