提交 ff6db068 作者: 薛凌堃

山东省人民政府

上级 5bba0870
import time
import time
......@@ -2,13 +2,17 @@ import time
import requests
from bs4 import BeautifulSoup
from base import BaseCore
import os
import pandas as pd
import numpy as np
import BaseCore
baseCore = BaseCore.BaseCore()
log = baseCore.getLogger()
from reits import Policy
policy = Policy()
topic = 'policy'
webname = '山东省人民政府'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0',
'X-Requested-With': 'XMLHttpRequest',
......@@ -55,8 +59,7 @@ def getDataJson(page):
def getContent(url, publishDate, num):
fjhref_list = ''
fjtitle_list = ''
id_list = []
soup = getSoup(url)
contentWithTag = soup.find('div', class_='wip_art_con')
a_list = contentWithTag.find_all('a')
......@@ -65,7 +68,7 @@ def getContent(url, publishDate, num):
fj_href = a.get('href')
if 'http' not in fj_href:
fj_href = 'http://www.shandong.gov.cn' + fj_href
fjhref_list += fj_href + '\n'
fj_title = a.text.lstrip().strip().replace(' ', '')
if fj_title == '':
fj_title = str(num_)
......@@ -73,13 +76,10 @@ def getContent(url, publishDate, num):
category = os.path.splitext(fj_href)[1]
if category not in fj_title:
fj_title = fj_title + category
fj_title = f'{num}-{publishDate}-{fj_title}'
fjtitle_list += fj_title + '\n'
fjcontent = getFjContent(fj_href)
file = f'./相关政策/山东省人民政府/政策文件/{fj_title}'
with open(file, 'wb') as f:
f.write(fjcontent)
log.info(f'{fj_title}===附件下载成功')
att_id, full_path = policy.attuributefile(fj_title, fj_href, num, publishDate)
if att_id:
id_list.append(att_id)
a['href'] = full_path
try:
scripts = contentWithTag.find_all('script')
for script in scripts:
......@@ -93,13 +93,13 @@ def getContent(url, publishDate, num):
except:
pass
content = contentWithTag.text.lstrip().strip()
return content, fjtitle_list, fjhref_list
return content, contentWithTag, id_list
def getData(soup, num):
origin = '山东省人民政府'
organ = ''
writtenDate = ''
writtenDate = None
pub_hao = ''
try:
type = soup.find('span', class_='szf_lmmc').text
......@@ -111,6 +111,11 @@ def getData(soup, num):
-1].text.lstrip().strip()
pub_hao = soup.find('table', class_='szf_xxgk').find_all('tr')[1].find_all('td')[-1].text.lstrip().strip()
href = soup.find('a', class_='szf_url').text.lstrip().strip()
# 根据链接判重
is_member = baseCore.r.sismember('REITs::' + webname, href)
if is_member:
return
publishDate = soup.find('span', class_='szf_rq').text.lstrip().strip()
else:
summary = soup.find('div', class_='szf_ms').text.lstrip().strip()
......@@ -121,16 +126,40 @@ def getData(soup, num):
summary = soup.find('div', class_='jcse-news-abs-content').text.lstrip().strip()
href = soup.find('div', class_='jcse-news-url').text.lstrip().strip()
publishDate = soup.find('span', class_='jcse-news-date').text.lstrip().strip()
content, fjtitle_list, fjhref_list = getContent(href, publishDate, num)
data = [num, title, publishDate, origin, href, writtenDate, organ, pub_hao, summary, content, fjtitle_list,
fjhref_list]
return data
content, contentWithTag, id_list = getContent(href, publishDate, num)
contentWithTag_str = str(contentWithTag)
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
dic_info = {
'attachmentIds': id_list,
'author': '',
'content': content,
'contentWithTag': contentWithTag_str,
'deleteFlag': 0,
'id': '',
'title': title,
'publishDate': publishDate,
'origin': origin,
'sourceAddress': href,
'writtenDate': writtenDate,
'organ': organ,
'topicClassification': '',
'issuedNumber': pub_hao,
'summary': summary,
'createDate': time_now,
'sid': '1729043593615560705',
}
try:
baseCore.sendkafka(dic_info, topic)
baseCore.r.sadd('REITs::' + webname, href)
log.info(f'采集成功--{title}--{href}')
except Exception as e:
for att_id in id_list:
baseCore.deliteATT(att_id)
return
def doJob():
if not os.path.exists('./相关政策/山东省人民政府/政策文件'):
os.makedirs('./相关政策/山东省人民政府/政策文件')
data_list = []
num = 1
for page in range(1, 3):
data_json = getDataJson(page)
......@@ -138,13 +167,7 @@ def doJob():
data_ = data_.replace('\\', '')
soup = BeautifulSoup(data_, 'lxml')
data = getData(soup, num)
data_list.append(data)
log.info(f'{data[1]}===采集成功')
num += 1
time.sleep(3)
df = pd.DataFrame(np.array(data_list))
df.columns = ['序号', '标题', '发布时间', '来源', '原文链接', '发文时间', '发文机构', '发文字号', '摘要', '正文', '附件名称', '附件连接']
df.to_excel('./相关政策/山东省人民政府/山东省人民政府政策文件.xlsx', index=False)
if __name__ == '__main__':
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论