提交 362b085c 作者: 薛凌堃

内蒙古人民政府

上级 ca21124d
import os
import os
......@@ -6,10 +6,17 @@ import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from base import BaseCore
import BaseCore
baseCore = BaseCore.BaseCore()
log = baseCore.getLogger()
from reits import Policy
policy = Policy()
topic = 'policy'
webname = '内蒙古自治区人民政府'
headers = {
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate, br',
......@@ -64,9 +71,9 @@ def getSoup(url):
def getPageSize():
ip = baseCore.get_proxy()
url = 'https://www.nmg.gov.cn/nmsearch/trssearch/searchAll.do?siteId=32&searchTag=zc&allKeywords=REITs&fullKeywords=&orKeywords=&notKeywords=&sort=&position=0&organization=&pageNum=1&pageSize=10&zcYear=&zcMonth=&docno=&cdesc=&publisher=&cityName=&isAlways=1&isSearchRmzfAndBgt=&isAccurate=1'
req = requests.get(url, headers=headers, proxies=ip)
req = requests.get(url, headers=headers)
req.encoding = req.apparent_encoding
total = int(req.json()['data']['total'])
if total % 10 == 0:
......@@ -77,23 +84,22 @@ def getPageSize():
def getJson(page):
ip = baseCore.get_proxy()
# ip = baseCore.get_proxy()
url = f'https://www.nmg.gov.cn/nmsearch/trssearch/searchAll.do?siteId=32&searchTag=zc&allKeywords=REITs&fullKeywords=&orKeywords=&notKeywords=&sort=&position=0&organization=&pageNum={page}&pageSize=10&zcYear=&zcMonth=&docno=&cdesc=&publisher=&cityName=&isAlways=1&isSearchRmzfAndBgt=&isAccurate=1'
req = requests.get(url, headers=headers, proxies=ip)
req = requests.get(url, headers=headers)
req.encoding = req.apparent_encoding
return req.json()['data']['data']
def getFjContent(url):
ip = baseCore.get_proxy()
req = requests.get(url, headers=headers_, proxies=ip)
# ip = baseCore.get_proxy()
req = requests.get(url, headers=headers_)
req.encoding = req.apparent_encoding
return req.content
def getContent(num, data):
fjhref_list = ''
fjtitle_list = ''
id_list = []
title = data['title']
pub_hao = data['docno']
origin = data['sitedesc']
......@@ -102,12 +108,16 @@ def getContent(num, data):
try:
writtenDate = data['scrq']
except:
writtenDate = ''
writtenDate = None
summary = BeautifulSoup(data['zc_doccontent'], 'html.parser').text.lstrip().strip()
url = data['docpuburl']
# 根据链接判重
is_member = baseCore.r.sismember('REITs::' + webname, url)
if is_member:
return
soup = getSoup(url)
if soup == '':
return ''
return
url_ = url.split('/')[-1]
soup = paserUrl(soup, url.replace(url_, ''))
contentWithTag = soup.find('div', attrs={'id': 'pare'})
......@@ -119,7 +129,9 @@ def getContent(num, data):
contentWithTag = soup.find('div', attrs={'class': 'zoomCon'})
if not contentWithTag:
contentWithTag = soup.find('div', attrs={'id': 'pagecontent'})
if writtenDate == '':
if not contentWithTag:
contentWithTag = soup.find('div', id="docContent")
if not writtenDate:
try:
tr_list = soup.find('table', class_='m-detailtb').find_all('tr')
for tr in tr_list:
......@@ -154,46 +166,69 @@ def getContent(num, data):
a_list = contentWithTag.find_all('a')
for a in a_list:
href = a.get('href')
fjhref_list += href + '\n'
category = os.path.splitext(href)[1]
fj_title = f'{num}-{publishDate}-{a.text.lstrip().strip()}'
if '<' in fj_title or '>' in fj_title:
fj_title = fj_title.replace('<', '').replace('>', '')
if category not in fj_title:
fj_title = fj_title + category
fjtitle_list += fj_title + '\n'
fjcontent = getFjContent(href)
file = f'./相关政策/内蒙古自治区人民政府/政策文件/{fj_title}'
with open(file, 'wb') as f:
f.write(fjcontent)
log.info(f'{fj_title}===附件下载成功')
# 附件
att_id, full_path = policy.attuributefile(fj_title, href, num, publishDate)
if att_id:
id_list.append(att_id)
a['href'] = full_path
except Exception as e:
log.error(title, '=====', e)
content = contentWithTag.text.lstrip().strip()
data_ = [num, title, publishDate, origin, url, writtenDate, organ, pub_hao, summary, content, fjtitle_list,
fjhref_list]
return data_
try:
content = contentWithTag.text.lstrip().strip()
except:
log.info(url)
return
contentWithTag_str = str(contentWithTag)
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
dic_info = {
'attachmentIds': id_list,
'author': '',
'content': content,
'contentWithTag': contentWithTag_str,
'deleteFlag': 0,
'id': '',
'title': title,
'publishDate': publishDate,
'origin': origin,
'sourceAddress': url,
'writtenDate': writtenDate,
'organ': organ,
'topicClassification': '',
'issuedNumber': pub_hao,
'summary': summary,
'createDate': time_now,
'sid': '1729041959772860417',
}
try:
baseCore.sendkafka(dic_info, topic)
baseCore.r.sadd('REITs::' + webname, url)
log.info(f'采集成功--{title}--{url}')
except Exception as e:
for att_id in id_list:
baseCore.deliteATT(att_id)
return
def doJob():
if not os.path.exists('./相关政策/内蒙古自治区人民政府/政策文件'):
os.makedirs('./相关政策/内蒙古自治区人民政府/政策文件')
data_list = []
pageSize = getPageSize()
num = 1
for page in range(1, pageSize + 1):
data_json = getJson(page)
for data_ in data_json:
if data_['chnldesc'] == '政策文件':
data = getContent(num, data_)
if data:
data_list.append(data)
num += 1
log.info(f'{data[1]}===采集成功')
df = pd.DataFrame(np.array(data_list))
df.columns = ['序号', '标题', '发布时间', '来源', '原文链接', '发文时间', '发文机构', '发文字号', '摘要', '正文', '附件名称', '附件连接']
df.to_excel('./内蒙古自治区人民政府政策文件.xlsx', index=False)
getContent(num, data_)
num += 1
if __name__ == '__main__':
doJob()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论