提交 78a94cdb 作者: 薛凌堃

上海市人民政府

上级 69ba202a
import json import json
...@@ -8,10 +8,17 @@ import requests ...@@ -8,10 +8,17 @@ import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from base import BaseCore import BaseCore
baseCore = BaseCore.BaseCore() baseCore = BaseCore.BaseCore()
log = baseCore.getLogger() log = baseCore.getLogger()
from reits import Policy
policy = Policy()
topic = 'policy'
webname = '上海市人民政府'
headers = { headers = {
'Accept': '*/*', 'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br', 'Accept-Encoding': 'gzip, deflate, br',
...@@ -54,17 +61,21 @@ def getFjContent(url): ...@@ -54,17 +61,21 @@ def getFjContent(url):
def getData(data_, driver, num): def getData(data_, driver, num):
fjhref_list = '' id_list = []
fjtitle_list = ''
title = data_['title']['raw'] title = data_['title']['raw']
publishDate = data_['date']['raw'] publishDate = data_['date']['raw']
origin = '上海市人民政府' origin = '上海市人民政府'
href = data_['url']['raw'] href = data_['url']['raw']
# 根据链接判重
is_member = baseCore.r.sismember('REITs::' + webname, href)
if is_member:
return
organ = data_['fwjg']['raw'] organ = data_['fwjg']['raw']
pub_hao = data_['wh']['raw'] pub_hao = data_['wh']['raw']
summary = data_['content']['snippet'] summary = data_['content']['snippet']
driver.get(href) driver.get(href)
time.sleep(1) time.sleep(1)
contentWithTag = driver.find_element(By.CLASS_NAME, 'Article_content')
content = driver.find_element(By.CLASS_NAME, 'Article_content').text.lstrip().strip() content = driver.find_element(By.CLASS_NAME, 'Article_content').text.lstrip().strip()
timeTag = driver.find_element(By.CLASS_NAME, 'PBtime').text timeTag = driver.find_element(By.CLASS_NAME, 'PBtime').text
try: try:
...@@ -76,47 +87,71 @@ def getData(data_, driver, num): ...@@ -76,47 +87,71 @@ def getData(data_, driver, num):
except: except:
writtenDate = timeTag.split('印发日期:')[1].lstrip().strip() writtenDate = timeTag.split('印发日期:')[1].lstrip().strip()
except: except:
writtenDate = '' writtenDate = None
try: try:
a_list = driver.find_element(By.CLASS_NAME, 'gaoj-list').find_elements(By.TAG_NAME, 'a') a_list = driver.find_element(By.CLASS_NAME, 'gaoj-list').find_elements(By.TAG_NAME, 'a')
for a in a_list: for a in a_list:
fj_href = a.get_attribute('href') fj_href = a.get_attribute('href')
fjhref_list += fj_href + '\n'
category = os.path.splitext(href)[1] category = os.path.splitext(fj_href)[1]
fj_title = f'{num}-{publishDate}-{a.text.lstrip().strip()}' fj_title = a.text.lstrip().strip()
if '<' in fj_title or '>' in fj_title: if '<' in fj_title or '>' in fj_title:
fj_title = fj_title.replace('<', '').replace('>', '') fj_title = fj_title.replace('<', '').replace('>', '')
if category not in fj_title: if category not in fj_title:
fj_title = fj_title + category fj_title = fj_title + category
fjtitle_list += fj_title + '\n' att_id, full_path = policy.attuributefile(fj_title, fj_href, num, publishDate)
# fjcontent = getFjContent(href) if att_id:
# file = f'./相关政策/内蒙古自治区人民政府/政策文件/{fj_title}' id_list.append(att_id)
# with open(file, 'wb') as f: a['href'] = full_path
# f.write(fjcontent) else:
# log.info(f'{fj_title}===附件下载成功') pass
except: except:
pass pass
data = [num, title, publishDate, origin, href, writtenDate, organ, pub_hao, summary, content, fjtitle_list, # data = [num, title, publishDate, origin, href, writtenDate, organ, pub_hao, summary, content, fjtitle_list,
fjhref_list] # fjhref_list]
return data contentWithTag_str = str(contentWithTag)
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
dic_info = {
'attachmentIds': id_list,
'author': '',
'content': content,
'contentWithTag': contentWithTag_str,
'deleteFlag': 0,
'id': '',
'title': title,
'publishDate': publishDate,
'origin': origin,
'sourceAddress': href,
'writtenDate': writtenDate,
'organ': organ,
'topicClassification': '',
'issuedNumber': pub_hao,
'summary': summary,
'createDate': time_now,
'sid': '1729042751554506754',
}
try:
baseCore.sendkafka(dic_info, topic)
baseCore.r.sadd('REITs::' + webname, href)
log.info(f'采集成功--{title}--{href}')
except Exception as e:
for att_id in id_list:
baseCore.deliteATT(att_id)
return
def doJob(): def doJob():
if not os.path.exists('./相关政策/上海市人民政府/政策文件'):
os.makedirs('./相关政策/上海市人民政府/政策文件') # driver = baseCore.buildDriver()
driver = baseCore.buildDriver() driver = policy.createDriver()
data_list = []
num = 1 num = 1
data_json = getDataJson() data_json = getDataJson()
for data_ in data_json: for data_ in data_json:
data = getData(data_, driver, num) data = getData(data_, driver, num)
log.info(f'{data[1]}===采集成功') # log.info(f'{data[1]}===采集成功')
data_list.append(data) # data_list.append(data)
num += 1 num += 1
df = pd.DataFrame(np.array(data_list))
df.columns = ['序号', '标题', '发布时间', '来源', '原文链接', '发文时间', '发文机构', '发文字号', '摘要', '正文', '附件名称', '附件连接']
df.to_excel('./相关政策/上海市人民政府/上海市人民政府政策文件.xlsx', index=False)
if __name__ == '__main__': if __name__ == '__main__':
doJob() doJob()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论