提交 95cfdf3b 作者: 薛凌堃

云南省人民政府

上级 f7c275d7
import os import os
...@@ -10,10 +10,17 @@ from bs4 import BeautifulSoup ...@@ -10,10 +10,17 @@ from bs4 import BeautifulSoup
from retry import retry from retry import retry
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from base import BaseCore import BaseCore
baseCore = BaseCore.BaseCore() baseCore = BaseCore.BaseCore()
log = baseCore.getLogger() log = baseCore.getLogger()
from reits import Policy
policy = Policy()
topic = 'policy'
webname = '云南省人民政府'
headers = { headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0',
} }
...@@ -36,9 +43,9 @@ def getFjContent(url): ...@@ -36,9 +43,9 @@ def getFjContent(url):
def getContent(url, publishDate, num): def getContent(url, publishDate, num):
fjhref_list = '' id_list = []
fjtitle_list = ''
soup = getSoup(url) soup = getSoup(url)
policy.paserUrl(soup, url)
contentWithTag = soup.find('div', class_='content') contentWithTag = soup.find('div', class_='content')
if not contentWithTag: if not contentWithTag:
contentWithTag = soup.find('div', class_='TRS_UEDITOR') contentWithTag = soup.find('div', class_='TRS_UEDITOR')
...@@ -62,24 +69,18 @@ def getContent(url, publishDate, num): ...@@ -62,24 +69,18 @@ def getContent(url, publishDate, num):
fj_href = a.get('href') fj_href = a.get('href')
if 'http' not in fj_href: if 'http' not in fj_href:
fj_href = 'https://www.yn.gov.cn' + fj_href fj_href = 'https://www.yn.gov.cn' + fj_href
fjhref_list += fj_href + '\n'
if fj_title == '': if fj_title == '':
fj_title = str(num_) fj_title = str(num_)
num_ += 1 num_ += 1
category = os.path.splitext(fj_href)[1] category = os.path.splitext(fj_href)[1]
if category not in fj_title: if category not in fj_title:
fj_title = fj_title + category fj_title = fj_title + category
fj_title = f'{num}-{publishDate}-{fj_title}' att_id, full_path = policy.attuributefile(fj_title, fj_href, num, publishDate)
fjtitle_list += fj_title + '\n' if att_id:
fjcontent = getFjContent(fj_href) id_list.append(att_id)
file = f'./相关政策/云南省人民政府/政策文件/{fj_title}' a['href'] = full_path
if os.path.exists(file):
fj_title = fj_title.replace(category, f'-{num_}{category}')
num_ += 1
file = f'./相关政策/云南省人民政府/政策文件/{fj_title}'
with open(file, 'wb') as f:
f.write(fjcontent)
log.info(f'{fj_title}===附件下载成功')
try: try:
a_list = soup.find('ul', class_='apfile').find_all('a') a_list = soup.find('ul', class_='apfile').find_all('a')
for a in a_list: for a in a_list:
...@@ -87,39 +88,38 @@ def getContent(url, publishDate, num): ...@@ -87,39 +88,38 @@ def getContent(url, publishDate, num):
fj_href = a.get('href') fj_href = a.get('href')
if 'http' not in fj_href: if 'http' not in fj_href:
fj_href = 'https://www.yn.gov.cn' + fj_href fj_href = 'https://www.yn.gov.cn' + fj_href
fjhref_list += fj_href + '\n'
if fj_title == '': if fj_title == '':
fj_title = str(num_) fj_title = str(num_)
num_ += 1 num_ += 1
category = os.path.splitext(fj_href)[1] category = os.path.splitext(fj_href)[1]
if category not in fj_title: if category not in fj_title:
fj_title = fj_title + category fj_title = fj_title + category
fj_title = f'{num}-{publishDate}-{fj_title}' att_id, full_path = policy.attuributefile(fj_title, fj_href, num, publishDate)
fjtitle_list += fj_title + '\n' if att_id:
fjcontent = getFjContent(fj_href) id_list.append(att_id)
file = f'./相关政策/云南省人民政府/政策文件/{fj_title}' a['href'] = full_path
if os.path.exists(file):
fj_title = fj_title.replace(category, f'-{num_}{category}')
num_ += 1
file = f'./相关政策/云南省人民政府/政策文件/{fj_title}'
with open(file, 'wb') as f:
f.write(fjcontent)
log.info(f'{fj_title}===附件下载成功')
except: except:
pass pass
return content, fjtitle_list, fjhref_list return content, contentWithTag, id_list
def getData(div, num): def getData(div, num):
pattern = r"\d{4}-\d{2}-\d{2}" pattern = r"\d{4}-\d{2}-\d{2}"
title = div.find_element(By.CLASS_NAME, 'title').find_element(By.CLASS_NAME, 'fontlan').get_attribute( title = div.find_element(By.CLASS_NAME, 'title').find_element(By.CLASS_NAME, 'fontlan').get_attribute(
'title').lstrip().strip() 'title').lstrip().strip()
href = div.find_element(By.CLASS_NAME, 'fontlan').get_attribute('href') href = div.find_element(By.CLASS_NAME, 'fontlan').get_attribute('href')
# 根据链接判重
is_member = baseCore.r.sismember('REITs::' + webname, href)
if is_member:
return
origin = '云南省人民政府' origin = '云南省人民政府'
try: try:
publishDate = re.findall(pattern, div.find_element(By.CLASS_NAME, 'content').text)[0] publishDate = re.findall(pattern, div.find_element(By.CLASS_NAME, 'content').text)[0]
except: except:
publishDate = '' publishDate = None
try: try:
organ = \ organ = \
div.find_element(By.CLASS_NAME, 'rowtab').find_elements(By.TAG_NAME, 'div')[0].find_elements(By.TAG_NAME, div.find_element(By.CLASS_NAME, 'rowtab').find_elements(By.TAG_NAME, 'div')[0].find_elements(By.TAG_NAME,
...@@ -135,30 +135,52 @@ def getData(div, num): ...@@ -135,30 +135,52 @@ def getData(div, num):
organ = '' organ = ''
pub_hao = '' pub_hao = ''
summary = '' summary = ''
writtenDate = '' writtenDate = None
if '.pdf' in href or '.PDF' in href: if '.pdf' in href or '.PDF' in href:
id_list = []
content = '' content = ''
fjhref_list = href contentWithTag_str = ''
fj_href = href
fj_title = title + '.pdf' fj_title = title + '.pdf'
fjcontent = getFjContent(fjhref_list) att_id, full_path = policy.attuributefile(fj_title, fj_href, num, publishDate)
file = f'./相关政策/云南省人民政府/政策文件/{fj_title}' if att_id:
with open(file, 'wb') as f: id_list.append(att_id)
f.write(fjcontent)
log.info(f'{fj_title}===附件下载成功')
fjtitle_list = fj_title
else: else:
content, fjtitle_list, fjhref_list = getContent(href, publishDate, num) content, contentWithTag, id_list = getContent(href, publishDate, num)
data = [num, title, publishDate, origin, href, writtenDate, organ, pub_hao, summary, content, fjtitle_list, contentWithTag_str = str(contentWithTag)
fjhref_list] time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
return data dic_info = {
'attachmentIds': id_list,
'author': '',
'content': content,
'contentWithTag': contentWithTag_str,
'deleteFlag': 0,
'id': '',
'title': title,
'publishDate': publishDate,
'origin': origin,
'sourceAddress': href,
'writtenDate': writtenDate,
'organ': organ,
'topicClassification': '',
'issuedNumber': pub_hao,
'summary': summary,
'createDate': time_now,
'sid': '1729046848292892673',
}
try:
baseCore.sendkafka(dic_info, topic)
baseCore.r.sadd('REITs::' + webname, href)
log.info(f'采集成功--{title}--{href}')
except Exception as e:
for att_id in id_list:
baseCore.deliteATT(att_id)
return
def doJob(): def doJob():
if not os.path.exists('./相关政策/云南省人民政府/政策文件'):
os.makedirs('./相关政策/云南省人民政府/政策文件')
data_list = []
url = 'https://sheng.so-gov.cn/s?siteCode=5300000033&qt=REITs' url = 'https://sheng.so-gov.cn/s?siteCode=5300000033&qt=REITs'
driver = baseCore.buildDriver() driver = policy.createDriver()
driver.get(url) driver.get(url)
time.sleep(2) time.sleep(2)
num = 1 num = 1
...@@ -176,17 +198,13 @@ def doJob(): ...@@ -176,17 +198,13 @@ def doJob():
time.sleep(2) time.sleep(2)
div_list = driver.find_elements(By.XPATH, '//*[@id="results"]/div') div_list = driver.find_elements(By.XPATH, '//*[@id="results"]/div')
for div in div_list: for div in div_list:
data = getData(div, num) getData(div, num)
data_list.append(data)
log.info(f'{data[1]}===采集成功')
num += 1 num += 1
try: try:
driver.find_element(By.CLASS_NAME, 'pagination').find_element(By.CLASS_NAME, 'next').click() driver.find_element(By.CLASS_NAME, 'pagination').find_element(By.CLASS_NAME, 'next').click()
except: except:
pass pass
df = pd.DataFrame(np.array(data_list))
df.columns = ['序号', '标题', '发布时间', '来源', '原文链接', '发文时间', '发文机构', '发文字号', '摘要', '正文', '附件名称', '附件连接']
df.to_excel('./相关政策/云南省人民政府/云南省人民政府政策文件.xlsx', index=False)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论