提交 9443f3af 作者: 薛凌堃

黑龙江省人民政府

上级 95cfdf3b
import os
#coding=utf-8
#coding=utf-8
import os
import time
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from base import BaseCore
import BaseCore
baseCore = BaseCore.BaseCore()
log = baseCore.getLogger()
from reits import Policy
policy = Policy()
topic = 'policy'
webname = '黑龙江省人民政府'
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Token': 'db345f2c-20fd-4cc8-9799-b9cd08b96392',
'Token': '9a9ff46e-f534-43b8-bad1-063d80af7e51',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0',
}
......@@ -34,10 +39,11 @@ def getDataJson():
def getSoup(url):
ip = baseCore.get_proxy()
req = requests.get(url,headers=headers,proxies=ip)
# ip = baseCore.get_proxy()
req = requests.get(url, headers=headers)
req.encoding = req.apparent_encoding
soup = BeautifulSoup(req.json()['content']['html'],'lxml')
print(req.json())
soup = BeautifulSoup(req.json()['content']['html'], 'lxml')
return soup
......@@ -49,10 +55,9 @@ def getFjContent(url):
def getContent(num, title, publishDate, summary, id, pub_hao, organ,type):
fjhref_list = ''
fjtitle_list = ''
id_list = []
url = f'https://www.hlj.gov.cn/znwd/policy/#/readDetails?id={id}'
writtenDate = ''
writtenDate = None
if type == '政策解读':
origin = organ
organ = ''
......@@ -60,7 +65,10 @@ def getContent(num, title, publishDate, summary, id, pub_hao, organ,type):
else:
origin = '黑龙江省人民政府'
href_ = f'https://www.hlj.gov.cn/znwd/policy/policy/policy/ctrl/public/chatPolicyFile/findById/{id}'
# 根据链接判重
is_member = baseCore.r.sismember('REITs::' + webname, url)
if is_member:
return
soup = getSoup(href_)
try:
a_list = soup.find_all('a')
......@@ -68,19 +76,17 @@ def getContent(num, title, publishDate, summary, id, pub_hao, organ,type):
href = a.get('href')
if '.html' in href or '.shtml' in href or '.htm' in href:
continue
fjhref_list += href + '\n'
category = os.path.splitext(href)[1]
fj_title = f'{num}-{publishDate}-{a.text.lstrip().strip()}'
fj_title = a.text.lstrip().strip()
if '<' in fj_title or '>' in fj_title:
fj_title = fj_title.replace('<', '').replace('>', '')
if category not in fj_title:
fj_title = fj_title + category
fjtitle_list += fj_title + '\n'
fjcontent = getFjContent(href)
file = f'./相关政策/黑龙江省人民政府/政策文件/{fj_title}'
with open(file, 'wb') as f:
f.write(fjcontent)
log.info(f'{fj_title}===附件下载成功')
att_id, full_path = policy.attuributefile(fj_title,href,num,publishDate)
if att_id:
id_list.append(att_id)
a['href'] = full_path
except Exception as e:
log.error(title, '=====', e)
try:
......@@ -96,15 +102,39 @@ def getContent(num, title, publishDate, summary, id, pub_hao, organ,type):
except:
pass
content = soup.text.lstrip().strip()
data_ = [num, title, writtenDate, origin, url, publishDate, organ, pub_hao, summary, content, fjtitle_list,
fjhref_list]
return data_
contentWithTag_str = str(soup)
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
dic_info = {
'attachmentIds': id_list,
'author': '',
'content': content,
'contentWithTag': contentWithTag_str,
'deleteFlag': 0,
'id': '',
'title': title,
'publishDate': publishDate,
'origin': origin,
'sourceAddress': url,
'writtenDate': writtenDate,
'organ': organ,
'topicClassification': '',
'issuedNumber': pub_hao,
'summary': summary,
'createDate': time_now,
'sid': '1729042585839841281',
}
try:
baseCore.sendkafka(dic_info, topic)
baseCore.r.sadd('REITs::' + webname, url)
log.info(f'采集成功--{title}--{url}')
except Exception as e:
for att_id in id_list:
baseCore.deliteATT(att_id)
return
def doJob():
if not os.path.exists('./相关政策/黑龙江省人民政府/政策文件'):
os.makedirs('./相关政策/黑龙江省人民政府/政策文件')
data_list = []
num = 1
data_json = getDataJson()
for data_ in data_json:
......@@ -122,12 +152,10 @@ def doJob():
except:
organ = ''
data = getContent(num, title, publishDate, summary, id, pub_hao, organ,type)
data_list.append(data)
# data_list.append(data)
num += 1
time.sleep(3)
df = pd.DataFrame(np.array(data_list))
df.columns = ['序号', '标题', '发布时间', '来源', '原文链接', '发文时间', '发文机构', '发文字号', '摘要', '正文', '附件名称', '附件连接']
df.to_excel('./相关政策/黑龙江省人民政府/黑龙江省人民政府政策文件.xlsx', index=False)
if __name__ == "__main__":
doJob()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论