|
from datetime import datetime, timedelta |
|
import time |
|
import urllib.request |
|
from lxml import etree |
|
from utils import crawl |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
i = 0 |
|
while i > -1: |
|
if i == 0: |
|
CATEGORY_URL = "https://www.gov.cn/zhengce/jiedu/home.htm" |
|
else: |
|
CATEGORY_URL = f"https://www.gov.cn/zhengce/jiedu/home_{i}.htm" |
|
i = i + 1 |
|
req = urllib.request.urlopen(CATEGORY_URL) |
|
text = req.read() |
|
html_text = text.decode("utf-8") |
|
page = etree.HTML(html_text) |
|
articlelist = page.xpath("//div[contains(@class, 'news_box')]//h4") |
|
for article in articlelist: |
|
if isinstance(article, etree._Element): |
|
subelement = etree.tostring(article).decode() |
|
subpage = etree.HTML(subelement) |
|
date = subpage.xpath("//span/text()")[0] |
|
parsed_datetime = datetime.strptime(time.strftime("%Y-%m-%d", time.strptime(date,"%Y-%m-%d")), "%Y-%m-%d") |
|
if parsed_datetime < (datetime.today() - timedelta(days=183)): |
|
i = -1 |
|
else: |
|
urls = subpage.xpath("//a[contains(@target, '_blank')]/@href") |
|
for url in urls: |
|
try: |
|
article = {} |
|
url = url.replace('../', 'https://www.gov.cn/zhengce/') |
|
if "https://www.gov.cn" in url: |
|
article['category']= "Policy Interpretation" |
|
crawl(url, article) |
|
except Exception as error: |
|
print(error) |
|
|
|
i = 0 |
|
while i > -1: |
|
if i == 0: |
|
CATEGORY_URL = "https://www.gov.cn/zhengce/zuixin/home.htm" |
|
else: |
|
CATEGORY_URL = f"https://www.gov.cn/zhengce/zuixin/home_{i}.htm" |
|
i = i + 1 |
|
req = urllib.request.urlopen(CATEGORY_URL) |
|
text = req.read() |
|
html_text = text.decode("utf-8") |
|
page = etree.HTML(html_text) |
|
articlelist = page.xpath("//div[contains(@class, 'news_box')]//h4") |
|
for article in articlelist: |
|
if isinstance(article, etree._Element): |
|
subelement = etree.tostring(article).decode() |
|
subpage = etree.HTML(subelement) |
|
date = subpage.xpath("//span/text()")[0] |
|
parsed_datetime = datetime.strptime(time.strftime("%Y-%m-%d", time.strptime(date,"%Y-%m-%d")), "%Y-%m-%d") |
|
if parsed_datetime < (datetime.today() - timedelta(days=183)): |
|
i = -1 |
|
else: |
|
urls = subpage.xpath("//a[contains(@target, '_blank')]/@href") |
|
for url in urls: |
|
try: |
|
article = {} |
|
url = url.replace('../', 'https://www.gov.cn/zhengce/') |
|
if "https://www.gov.cn" in url: |
|
article['category']= "Policy Release" |
|
article['originSite'] = "国务院" |
|
article['site'] = "State Council of China" |
|
crawl(url, article) |
|
except Exception as error: |
|
print(error) |
|
|