Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Z
zzsn_spider
概览
概览
详情
活动
周期分析
版本库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
王景浩
zzsn_spider
Commits
db64a87a
提交
db64a87a
authored
12月 05, 2023
作者:
薛凌堃
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
江苏省人民政府
上级
78a94cdb
显示空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
105 行增加
和
47 行删除
+105
-47
policy-jiangsu.py
REITs专题数据/policy-jiangsu.py
+105
-47
没有找到文件。
REITs专题数据/policy-jiangsu.py
浏览文件 @
db64a87a
impor
t
os
impor
t
os
...
...
@@ -8,11 +8,18 @@ import requests
from
bs4
import
BeautifulSoup
from
selenium.webdriver.common.by
import
By
from
base
import
BaseCore
import
BaseCore
baseCore
=
BaseCore
.
BaseCore
()
log
=
baseCore
.
getLogger
()
from
reits
import
Policy
policy
=
Policy
()
topic
=
'policy'
webname
=
'江苏省人民政府'
headers
=
{
'User-Agent'
:
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0'
,
'Content-Type'
:
'application/x-www-form-urlencoded'
,
...
...
@@ -35,9 +42,8 @@ def getFjContent(url):
return
req
.
content
def
getContentA
(
url
,
num
,
publishDate
,
title
):
fjhref_list
=
''
fjtitle_list
=
''
def
getContentA
(
url
,
num
,
publishDate
,
title
,
origin
,
summary
):
id_list
=
[]
soup
=
getSoup
(
url
)
organ
=
soup
.
find
(
'div'
,
class_
=
'sp_time'
)
.
text
.
split
(
'来源:'
)[
1
]
.
split
(
'字体'
)[
0
]
.
lstrip
()
.
strip
()
contentWithTag
=
soup
.
find
(
'div'
,
attrs
=
{
'id'
:
'zoom'
})
...
...
@@ -60,31 +66,56 @@ def getContentA(url, num, publishDate, title):
fj_href
=
img
.
get
(
'src'
)
try
:
fj_href
=
'http://www.jiangsu.gov.cn'
+
fj_href
fjhref_list
+=
fj_href
+
'
\n
'
fj_title
=
img
.
get
(
'title'
)
.
lstrip
()
.
strip
()
fj_title
=
f
'{num}-{publishDate}-{fj_title}'
fjtitle_list
+=
fj_title
+
'
\n
'
except
:
if
'img/png'
in
fj_href
:
fj_title
=
f
'{
num}-{publishDate}-{
title}-{num_}.png'
fj_title
=
f
'{title}-{num_}.png'
elif
'img/jpg'
in
fj_href
:
fj_title
=
f
'{
num}-{publishDate}-{
title}-{num_}.jpg'
fj_title
=
f
'{title}-{num_}.jpg'
num_
+=
1
fjcontent
=
getFjContent
(
fj_href
)
file
=
f
'./相关政策/江苏省人民政府/政策文件/{fj_title}'
with
open
(
file
,
'wb'
)
as
f
:
f
.
write
(
fjcontent
)
log
.
info
(
f
'{fj_title}===附件下载成功'
)
att_id
,
full_path
=
policy
.
attuributefile
(
fj_title
,
fj_href
,
num
,
publishDate
)
if
att_id
:
id_list
.
append
(
att_id
)
img
[
'href'
]
=
full_path
else
:
pass
except
:
pass
content
=
contentWithTag
.
text
return
organ
,
content
,
fjtitle_list
,
fjhref_list
contentWithTag_str
=
str
(
contentWithTag
)
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
dic_info
=
{
'attachmentIds'
:
id_list
,
'author'
:
''
,
'content'
:
content
,
'contentWithTag'
:
contentWithTag_str
,
'deleteFlag'
:
0
,
'id'
:
''
,
'title'
:
title
,
'publishDate'
:
publishDate
,
'origin'
:
origin
,
'sourceAddress'
:
url
,
'writtenDate'
:
None
,
'organ'
:
organ
,
'topicClassification'
:
''
,
'issuedNumber'
:
''
,
'summary'
:
summary
,
'createDate'
:
time_now
,
'sid'
:
'1729042894974537730'
,
}
try
:
baseCore
.
sendkafka
(
dic_info
,
topic
)
baseCore
.
r
.
sadd
(
'REITs::'
+
webname
,
url
)
log
.
info
(
f
'采集成功--{title}--{url}'
)
except
Exception
as
e
:
for
att_id
in
id_list
:
baseCore
.
deliteATT
(
att_id
)
return
def
getContentB
(
url
,
num
,
publishDate
,
title
):
fjhref_list
=
''
fjtitle_list
=
''
def
getContentB
(
url
,
num
,
publishDate
,
title
,
origin
,
summary
):
id_list
=
[]
soup
=
getSoup
(
url
)
info
=
soup
.
find
(
'table'
,
class_
=
'xxgk_table'
)
.
text
.
replace
(
' '
,
''
)
organ
=
info
.
split
(
'发布机构:'
)[
1
]
.
split
(
'发文日期'
)[
0
]
.
lstrip
()
.
strip
()
...
...
@@ -110,61 +141,88 @@ def getContentB(url, num, publishDate, title):
fj_href
=
img
.
get
(
'src'
)
try
:
fj_title
=
img
.
get
(
'title'
)
.
lstrip
()
.
strip
()
fj_title
=
f
'{num}-{publishDate}-{fj_title}'
fjtitle_list
+=
fj_title
+
'
\n
'
fj_href
=
'http://www.jiangsu.gov.cn'
+
fj_href
fjhref_list
+=
fj_href
+
'
\n
'
fjcontent
=
getFjContent
(
fj_href
)
file
=
f
'./相关政策/江苏省人民政府/政策文件/{fj_title}'
with
open
(
file
,
'wb'
)
as
f
:
f
.
write
(
fjcontent
)
log
.
info
(
f
'{fj_title}===附件下载成功'
)
except
:
if
'image/png'
in
fj_href
:
fj_title
=
f
'{
num}-{publishDate}-{
title}-{num_}.png'
fj_title
=
f
'{title}-{num_}.png'
elif
'image/jpg'
in
fj_href
:
fj_title
=
f
'{
num}-{publishDate}-{
title}-{num_}.jpg'
fj_title
=
f
'{title}-{num_}.jpg'
num_
+=
1
fjtitle_list
+=
fj_title
+
'
\n
'
try
:
att_id
,
full_path
=
policy
.
attuributefile
(
fj_title
,
fj_href
,
num
,
publishDate
)
except
:
att_id
=
''
if
att_id
:
id_list
.
append
(
att_id
)
img
[
'href'
]
=
full_path
else
:
pass
content
=
contentWithTag
.
text
.
lstrip
()
.
strip
()
return
organ
,
writtenDate
,
pub_hao
,
content
,
fjtitle_list
,
fjhref_list
contentWithTag_str
=
str
(
contentWithTag
)
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
dic_info
=
{
'attachmentIds'
:
id_list
,
'author'
:
''
,
'content'
:
content
,
'contentWithTag'
:
contentWithTag_str
,
'deleteFlag'
:
0
,
'id'
:
''
,
'title'
:
title
,
'publishDate'
:
publishDate
,
'origin'
:
origin
,
'sourceAddress'
:
url
,
'writtenDate'
:
writtenDate
,
'organ'
:
organ
,
'topicClassification'
:
''
,
'issuedNumber'
:
pub_hao
,
'summary'
:
summary
,
'createDate'
:
time_now
,
'sid'
:
'1729042894974537730'
,
}
try
:
baseCore
.
sendkafka
(
dic_info
,
topic
)
baseCore
.
r
.
sadd
(
'REITs::'
+
webname
,
url
)
log
.
info
(
f
'采集成功--{title}--{url}'
)
except
Exception
as
e
:
for
att_id
in
id_list
:
baseCore
.
deliteATT
(
att_id
)
return
def
doJob
():
if
not
os
.
path
.
exists
(
'./相关政策/江苏省人民政府/政策文件'
):
os
.
makedirs
(
'./相关政策/江苏省人民政府/政策文件'
)
pattern
=
r"\d{4}-\d{2}-\d{2}"
url
=
'http://www.jiangsu.gov.cn/jsearchfront/search.do?websiteid=320000000100000&searchid=12&pg=&p=1&tpl=38&serviceType=&cateid=27&q=REITs&pq=&oq=&eq=&pos=&sortType=0&begin=&end='
driver
=
baseCore
.
buildDriver
()
# driver = baseCore.buildDriver()
driver
=
policy
.
createDriver
()
driver
.
get
(
url
)
time
.
sleep
(
5
)
div_list
=
driver
.
find_elements
(
By
.
CLASS_NAME
,
'news-result'
)
num
=
1
data_list
=
[]
for
div
in
div_list
:
id_list
=
[]
title
=
div
.
find_element
(
By
.
CLASS_NAME
,
'jcse-news-title'
)
.
find_element
(
By
.
TAG_NAME
,
'a'
)
.
get_attribute
(
'title'
)
.
lstrip
()
.
strip
()
href
=
div
.
find_element
(
By
.
CLASS_NAME
,
'jcse-news-title'
)
.
find_element
(
By
.
TAG_NAME
,
'a'
)
.
get_attribute
(
'href'
)
# 根据链接判重
is_member
=
baseCore
.
r
.
sismember
(
'REITs::'
+
webname
,
href
)
if
is_member
:
continue
type
=
div
.
find_element
(
By
.
CLASS_NAME
,
'biaoqian'
)
.
text
.
lstrip
()
.
strip
()
summary
=
div
.
find_element
(
By
.
CLASS_NAME
,
'jcse-news-abs-content'
)
.
text
.
lstrip
()
.
strip
()
dateInfo
=
div
.
find_element
(
By
.
CLASS_NAME
,
'jcse-news-date'
)
.
text
publishDate
=
re
.
findall
(
pattern
,
dateInfo
)[
0
]
origin
=
dateInfo
.
replace
(
publishDate
,
''
)
.
lstrip
()
.
strip
()
if
type
==
'政务公开'
:
organ
,
content
,
fjtitle_list
,
fjhref_list
=
getContentA
(
href
,
num
,
publishDate
,
title
)
writtenDate
=
''
pub_hao
=
''
getContentA
(
href
,
num
,
publishDate
,
title
,
origin
,
summary
)
else
:
organ
,
writtenDate
,
pub_hao
,
content
,
fjtitle_list
,
fjhref_list
=
getContentB
(
href
,
num
,
publishDate
,
title
)
data
=
[
num
,
title
,
publishDate
,
origin
,
href
,
writtenDate
,
organ
,
pub_hao
,
summary
,
content
,
fjtitle_list
,
fjhref_list
]
data_list
.
append
(
data
)
log
.
info
(
f
'{title}===采集成功'
)
getContentB
(
href
,
num
,
publishDate
,
title
,
origin
,
summary
)
num
+=
1
time
.
sleep
(
5
)
driver
.
close
()
df
=
pd
.
DataFrame
(
np
.
array
(
data_list
))
df
.
columns
=
[
'序号'
,
'标题'
,
'发布时间'
,
'来源'
,
'原文链接'
,
'发文时间'
,
'发文机构'
,
'发文字号'
,
'摘要'
,
'正文'
,
'附件名称'
,
'附件连接'
]
df
.
to_excel
(
'./江苏省人民政府政策文件.xlsx'
,
index
=
False
)
if
__name__
==
'__main__'
:
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论