Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Z
zzsn_spider
概览
概览
详情
活动
周期分析
版本库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
1
合并请求
1
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
丁双波
zzsn_spider
Commits
687679ca
提交
687679ca
authored
12月 05, 2023
作者:
薛凌堃
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
广东省人民政府
上级
d8ac5582
显示空白字符变更
内嵌
并排
正在显示
1 个修改的文件
包含
76 行增加
和
76 行删除
+76
-76
policy-guangdong.py
REITs专题数据/policy-guangdong.py
+76
-76
没有找到文件。
REITs专题数据/policy-guangdong.py
浏览文件 @
687679ca
impor
t
datetime
impor
t
datetime
...
...
@@ -6,13 +6,21 @@ import requests
from
bs4
import
BeautifulSoup
from
retry
import
retry
from
base
import
BaseCore
import
os
import
pandas
as
pd
import
numpy
as
np
import
BaseCore
baseCore
=
BaseCore
.
BaseCore
()
log
=
baseCore
.
getLogger
()
from
reits
import
Policy
policy
=
Policy
()
topic
=
'policy'
webname
=
'广东省人民政府'
headers
=
{
'Content-Type'
:
'application/json'
,
'User-Agent'
:
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36'
,
...
...
@@ -65,10 +73,10 @@ def getDataJson(url, data_post):
def
getContent
(
url
,
publishDate
,
num
):
fjhref_list
=
''
fjtitle_list
=
''
id_list
=
[]
soup
=
getSoup
(
url
)
time
.
sleep
(
2
)
policy
.
paserUrl
(
soup
,
url
)
try
:
try
:
contentWithTag
=
soup
.
select
(
'body > div.con > div.viewList > div.zw'
)[
0
]
...
...
@@ -78,54 +86,21 @@ def getContent(url, publishDate, num):
contentWithTag
=
soup
.
find
(
'div'
,
class_
=
'article-content'
)
.
find
(
'center'
)
if
not
contentWithTag
:
contentWithTag
=
soup
.
find
(
'div'
,
class_
=
'article-content'
)
img_list
=
contentWithTag
.
find_all
(
'img'
)
num_
=
1
for
img
in
img_list
:
fj_href
=
img
.
get
(
'src'
)
if
"http"
not
in
fj_href
and
'//www'
in
fj_href
:
fj_href
=
'http:'
+
fj_href
fjhref_list
+=
fj_href
+
'
\n
'
fj_title
=
img
.
get
(
'alt'
)
if
fj_title
==
''
:
fj_title
=
str
(
num_
)
num_
+=
1
category
=
os
.
path
.
splitext
(
fj_href
)[
1
]
if
category
not
in
fj_title
:
fj_title
=
fj_title
+
category
fj_title
=
f
'{num}-{publishDate}-{fj_title}'
fjcontent
=
getFjContent
(
fj_href
)
file
=
f
'./相关政策/广东省人民政府/政策文件/{fj_title}'
if
os
.
path
.
exists
(
file
):
file
=
file
.
replace
(
category
,
f
'-{num_}{category}'
)
num_
+=
1
if
os
.
path
.
exists
(
file
):
fj_title
=
fj_title
.
replace
(
category
,
f
'-{num_}{category}'
)
file
=
f
'./相关政策/广东省人民政府/政策文件/{fj_title}'
fjtitle_list
+=
fj_title
+
'
\n
'
with
open
(
file
,
'wb'
)
as
f
:
f
.
write
(
fjcontent
)
log
.
info
(
f
'{fj_title}===附件下载成功'
)
a_list
=
contentWithTag
.
find_all
(
'a'
)
for
a
in
a_list
:
fj_href
=
a
.
get
(
'href'
)
fjhref_list
+=
fj_href
+
'
\n
'
fj_title
=
a
.
text
.
lstrip
()
.
strip
()
if
fj_title
==
''
:
fj_title
=
str
(
num
_
)
num
_
+=
1
fj_title
=
str
(
num
)
num
+=
1
category
=
os
.
path
.
splitext
(
fj_href
)[
1
]
if
category
not
in
fj_title
:
fj_title
=
fj_title
+
category
fj_title
=
f
'{num}-{publishDate}-{fj_title}'
fjcontent
=
getFjContent
(
fj_href
)
file
=
f
'./相关政策/广东省人民政府/政策文件/{fj_title}'
if
os
.
path
.
exists
(
file
):
file
=
file
.
replace
(
category
,
f
'-{num_}{category}'
)
num_
+=
1
fjtitle_list
+=
fj_title
+
'
\n
'
with
open
(
file
,
'wb'
)
as
f
:
f
.
write
(
fjcontent
)
log
.
info
(
f
'{fj_title}===附件下载成功'
)
att_id
,
full_path
=
policy
.
attuributefile
(
fj_title
,
fj_href
,
num
,
publishDate
)
if
att_id
:
id_list
.
append
(
att_id
)
try
:
scripts
=
contentWithTag
.
find_all
(
'script'
)
for
script
in
scripts
:
...
...
@@ -139,9 +114,8 @@ def getContent(url, publishDate, num):
except
:
pass
content
=
contentWithTag
.
text
.
lstrip
()
.
strip
()
fjtitle_list
=
fjtitle_list
.
lstrip
()
.
strip
()
fjhref_list
=
fjhref_list
.
lstrip
()
.
strip
()
return
content
,
fjtitle_list
,
fjhref_list
return
content
,
contentWithTag
,
id_list
def
ST
(
txt
):
...
...
@@ -149,12 +123,17 @@ def ST(txt):
return
txt
def
getData
(
data_
,
num
):
def
getData
(
data_
,
num
,
sid
):
title
=
ST
(
data_
[
'title'
])
log
.
info
(
f
'{title}===开始采集'
)
publishDate
=
data_
[
'pub_time'
]
origin
=
data_
[
'publisher_src'
]
href
=
data_
[
'url'
]
# 根据链接判重
is_member
=
baseCore
.
r
.
sismember
(
'REITs::'
+
webname
,
href
)
if
is_member
:
return
log
.
info
(
href
)
writtenDate
=
data_
[
'date'
]
if
writtenDate
:
...
...
@@ -162,17 +141,44 @@ def getData(data_, num):
organ
=
data_
[
'source'
]
pub_hao
=
data_
[
'document_number'
]
summary
=
ST
(
data_
[
'content'
])
content
,
fjtitle_list
,
fjhref_list
=
getContent
(
href
,
publishDate
,
num
)
data
=
[
num
,
title
,
publishDate
,
origin
,
href
,
writtenDate
,
organ
,
pub_hao
,
summary
,
content
,
fjtitle_list
,
fjhref_list
]
return
data
def
doJob_1
():
if
not
os
.
path
.
exists
(
'./相关政策/广东省人民政府/政策文件'
):
os
.
makedirs
(
'./相关政策/广东省人民政府/政策文件'
)
content
,
contentWithTag
,
id_list
=
getContent
(
href
,
publishDate
,
num
)
contentWithTag_str
=
str
(
contentWithTag
)
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
dic_info
=
{
'attachmentIds'
:
id_list
,
'author'
:
''
,
'content'
:
content
,
'contentWithTag'
:
contentWithTag_str
,
'deleteFlag'
:
0
,
'id'
:
''
,
'title'
:
title
,
'publishDate'
:
publishDate
,
'origin'
:
origin
,
'sourceAddress'
:
href
,
'writtenDate'
:
writtenDate
,
'organ'
:
organ
,
'topicClassification'
:
''
,
'issuedNumber'
:
pub_hao
,
'summary'
:
summary
,
'createDate'
:
time_now
,
'sid'
:
sid
,
}
try
:
baseCore
.
sendkafka
(
dic_info
,
topic
)
baseCore
.
r
.
sadd
(
'REITs::'
+
webname
,
href
)
log
.
info
(
f
'采集成功--{title}--{href}'
)
except
Exception
as
e
:
for
att_id
in
id_list
:
baseCore
.
deliteATT
(
att_id
)
return
# 政策文件
def
doJob_1
(
sid1
):
# if not os.path.exists('./相关政策/广东省人民政府/政策文件'):
# os.makedirs('./相关政策/广东省人民政府/政策文件')
pageSize
=
getPageSize
()
data_list
=
[]
num
=
1
url
=
'https://search.gd.gov.cn/api/search/file'
for
page
in
range
(
1
,
pageSize
+
1
):
...
...
@@ -182,17 +188,15 @@ def doJob_1():
data_post
=
json
.
dumps
(
data_post
)
data_json
=
getDataJson
(
url
,
data_post
)
for
data_
in
data_json
:
data
=
getData
(
data_
,
num
)
data_list
.
append
(
data
)
log
.
info
(
f
'{data[1]}===采集成功'
)
getData
(
data_
,
num
,
sid1
)
num
+=
1
return
data_list
,
num
return
def
doJob_2
(
num
):
def
doJob_2
(
sid2
):
url
=
'https://search.gd.gov.cn/api/search/all'
types
=
[
'政策解读'
,
'计划规划'
]
data_list
=
[]
num
=
1
for
type
in
types
:
data_post
=
{
"label"
:
f
"{type}"
,
"position"
:
"all"
,
"keywords"
:
"REITs"
,
"sort"
:
"smart"
,
"site_id"
:
"2"
,
"range"
:
"site"
,
"page"
:
1
,
"tag_name"
:
f
"{type}"
,
"recommand"
:
1
,
"gdbsDivision"
:
"440000"
,
...
...
@@ -200,23 +204,19 @@ def doJob_2(num):
data_post
=
json
.
dumps
(
data_post
)
data_json
=
getDataJson
(
url
,
data_post
)
for
data_
in
data_json
:
data
=
getData
(
data_
,
num
)
getData
(
data_
,
num
,
sid2
)
time
.
sleep
(
1
)
data_list
.
append
(
data
)
log
.
info
(
f
'{data[1]}===采集成功'
)
num
+=
1
return
data_list
return
def
doJob
():
data_list
=
[]
data_list_
,
num
=
doJob_1
()
data_list
+=
data_list_
data_list_
=
doJob_2
(
num
)
data_list
+=
data_list_
df
=
pd
.
DataFrame
(
np
.
array
(
data_list
))
df
.
columns
=
[
'序号'
,
'标题'
,
'发布时间'
,
'来源'
,
'原文链接'
,
'发文时间'
,
'发文机构'
,
'发文字号'
,
'摘要'
,
'正文'
,
'附件名称'
,
'附件连接'
]
df
.
to_excel
(
'./相关政策/广东省人民政府/广东省人民政府政策文件.xlsx'
,
index
=
False
)
sid1
=
'1729044231736971266'
sid2
=
'1729044396395048961'
doJob_1
(
sid1
)
doJob_2
(
sid2
)
if
__name__
==
'__main__'
:
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论