Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Z
zzsn_spider
概览
概览
详情
活动
周期分析
版本库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
王景浩
zzsn_spider
Commits
639117a7
提交
639117a7
authored
11月 30, 2023
作者:
薛凌堃
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
11.30
上级
3fdea62f
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
90 行增加
和
48 行删除
+90
-48
BaseCore.py
REITs专题数据/BaseCore.py
+19
-17
policy-tianjin.py
REITs专题数据/policy-tianjin.py
+71
-31
没有找到文件。
REITs专题数据/BaseCore.py
浏览文件 @
639117a7
# REI
Ts专题核心工具包
# REI
Ts专题核心工具包
...
...
@@ -522,7 +522,7 @@ class BaseCore:
response
=
requests
.
get
(
file_href
,
headers
=
headers
,
verify
=
False
,
timeout
=
20
)
file_size
=
int
(
response
.
headers
.
get
(
'Content-Length'
))
break
except
:
except
Exception
as
e
:
time
.
sleep
(
3
)
continue
for
i
in
range
(
0
,
3
):
...
...
@@ -538,7 +538,10 @@ class BaseCore:
retData
[
'state'
]
=
True
retData
[
'path'
]
=
result
[
'body'
][
'objectUrl'
]
.
split
(
'.com'
)[
1
]
retData
[
'full_path'
]
=
unquote
(
result
[
'body'
][
'objectUrl'
])
retData
[
'file_size'
]
=
self
.
convert_size
(
file_size
)
try
:
retData
[
'file_size'
]
=
self
.
convert_size
(
file_size
)
except
:
retData
[
'file_size'
]
=
''
retData
[
'create_time'
]
=
time_now
except
Exception
as
e
:
print
(
f
'error:{e}'
)
...
...
@@ -546,20 +549,19 @@ class BaseCore:
return
retData
def
sendkafka
(
self
,
post_data
,
topic
):
try
:
producer
=
KafkaProducer
(
bootstrap_servers
=
[
'114.115.159.144:9092'
],
api_version
=
(
2
,
0
,
2
))
kafka_result
=
producer
.
send
(
topic
,
json
.
dumps
(
post_data
,
ensure_ascii
=
False
)
.
encode
(
'utf8'
))
print
(
kafka_result
.
get
(
timeout
=
10
))
dic_result
=
{
'success'
:
'ture'
,
'message'
:
'操作成功'
,
'code'
:
'200'
,
}
self
.
getLogger
()
.
info
(
dic_result
)
return
True
except
:
return
False
producer
=
KafkaProducer
(
bootstrap_servers
=
[
'114.115.159.144:9092'
],
api_version
=
(
2
,
0
,
2
))
kafka_result
=
producer
.
send
(
topic
,
json
.
dumps
(
post_data
,
ensure_ascii
=
False
)
.
encode
(
'utf8'
))
print
(
kafka_result
.
get
(
timeout
=
10
))
dic_result
=
{
'success'
:
'ture'
,
'message'
:
'操作成功'
,
'code'
:
'200'
,
}
self
.
getLogger
()
.
info
(
dic_result
)
return
True
...
...
REITs专题数据/policy-tianjin.py
浏览文件 @
639117a7
impor
t
os
impor
t
os
import
os
import
time
from
datetime
import
datetime
from
urllib.parse
import
urljoin
...
...
@@ -6,10 +7,21 @@ import numpy as np
import
pandas
as
pd
import
requests
from
bs4
import
BeautifulSoup
from
base
import
BaseCore
import
BaseCore
baseCore
=
BaseCore
.
BaseCore
()
log
=
baseCore
.
getLogger
()
from
reits
import
Policy
policy
=
Policy
()
topic
=
'policy'
webname
=
'天津市人民政府'
import
urllib3
urllib3
.
disable_warnings
(
urllib3
.
exceptions
.
InsecureRequestWarning
)
headers
=
{
'Accept'
:
'application/json, text/plain, */*'
,
'Accept-Encoding'
:
'gzip, deflate, br'
,
...
...
@@ -75,9 +87,8 @@ def getFjContent(url):
return
req
.
content
def
getContent
(
num
,
title
,
pub_time
,
origin
,
organ
,
url
,
pub_hao
,
summary
,
):
fjhref_list
=
''
fjtitle_list
=
''
def
getContent
(
num
,
title
,
pub_time
,
origin
,
organ
,
url
,
pub_hao
,
summary
):
id_list
=
[]
soup
=
getSoup
(
url
)
url_
=
url
.
split
(
'/'
)[
-
1
]
soup
=
paserUrl
(
soup
,
url
.
replace
(
url_
,
''
))
...
...
@@ -102,55 +113,84 @@ def getContent(num, title, pub_time, origin, organ, url, pub_hao, summary, ):
a_list
=
contentWithTag
.
find
(
'div'
,
class_
=
'qt-attachments'
)
.
find_all
(
'a'
)
for
a
in
a_list
:
href
=
a
.
get
(
'href'
)
fjhref_list
+=
href
+
'
\n
'
category
=
os
.
path
.
splitext
(
href
)[
1
]
fj_title
=
f
'{num}-{pub_time}-{a.text.lstrip().strip()}'
fj_title
=
a
.
text
.
lstrip
()
.
strip
()
if
'<'
in
fj_title
or
'>'
in
fj_title
:
fj_title
=
fj_title
.
replace
(
'<'
,
''
)
.
replace
(
'>'
,
''
)
if
category
not
in
fj_title
:
fj_title
=
fj_title
+
category
fjtitle_list
+=
fj_title
+
'
\n
'
fjcontent
=
getFjContent
(
href
)
file
=
f
'./相关政策/天津市人民政府/政策文件/{fj_title}'
with
open
(
file
,
'wb'
)
as
f
:
f
.
write
(
fjcontent
)
log
.
info
(
f
'{title}===附件下载成功'
)
except
:
pass
try
:
contentWithTag
.
find
(
'div'
,
class_
=
'qt-attachments'
)
.
decompose
()
att_id
,
full_path
=
policy
.
attuributefile
(
fj_title
,
href
,
num
,
pub_time
)
if
att_id
:
id_list
.
append
(
att_id
)
a
[
'href'
]
=
full_path
except
:
pass
# try:
# contentWithTag.find('div', class_='qt-attachments').decompose()
# except:
# pass
content
=
contentWithTag
.
text
.
lstrip
()
.
strip
()
fjtitle_list
=
fjtitle_list
.
lstrip
()
.
strip
()
fjhref_list
=
fjhref_list
.
lstrip
()
.
strip
()
data
=
[
num
,
title
,
pub_time
,
origin
,
url
,
pub_time
,
organ
,
pub_hao
,
summary
,
content
,
fjtitle_list
,
fjhref_list
]
return
data
contentWithTag_str
=
str
(
contentWithTag
)
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
dic_info
=
{
'attachmentIds'
:
id_list
,
'author'
:
''
,
'content'
:
content
,
'contentWithTag'
:
contentWithTag_str
,
'deleteFlag'
:
0
,
'id'
:
''
,
'title'
:
title
,
'publishDate'
:
pub_time
,
'origin'
:
origin
,
'sourceAddress'
:
url
,
'writtenDate'
:
pub_time
,
'organ'
:
organ
,
'topicClassification'
:
''
,
'issuedNumber'
:
pub_hao
,
'summary'
:
summary
,
'createDate'
:
time_now
,
'sid'
:
'1729041400674045953'
,
}
try
:
baseCore
.
sendkafka
(
dic_info
,
topic
)
baseCore
.
r
.
sadd
(
'REITs::'
+
webname
,
url
)
log
.
info
(
f
'采集成功--{title}--{url}'
)
except
Exception
as
e
:
for
att_id
in
id_list
:
baseCore
.
deliteATT
(
att_id
)
def
doJob
():
if
not
os
.
path
.
exists
(
'./相关政策/天津市人民政府/政策文件'
):
os
.
makedirs
(
'./相关政策/天津市人民政府/政策文件'
)
data_list
=
[]
#
if not os.path.exists('./相关政策/天津市人民政府/政策文件'):
#
os.makedirs('./相关政策/天津市人民政府/政策文件')
#
data_list = []
total
=
getTotal
()
num
=
1
for
page
in
range
(
1
,
total
+
1
):
data_json
=
getJson
(
page
)
for
i
in
range
(
len
(
data_json
)):
title
=
data_json
[
i
][
'title'
]
pub_time
=
datetime
.
strptime
(
data_json
[
i
][
'trs_time'
],
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S.
%
f
%
z"
)
.
date
()
parsed_date
=
datetime
.
strptime
(
data_json
[
i
][
'trs_time'
],
'
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S.
%
f
%
z'
)
# 将解析后的datetime对象格式化为目标字符串格式
pub_time
=
parsed_date
.
strftime
(
'
%
Y-
%
m-
%
d
%
H:
%
M:
%
S'
)
origin
=
data_json
[
i
][
'trs_site'
]
organ
=
data_json
[
i
][
'department'
]
href
=
data_json
[
i
][
'url'
]
# 根据链接判重
is_member
=
baseCore
.
r
.
sismember
(
'REITs::'
+
webname
,
href
)
if
is_member
:
continue
pub_hao
=
data_json
[
i
][
'wh'
]
summary
=
''
data
=
getContent
(
num
,
title
,
pub_time
,
origin
,
organ
,
href
,
pub_hao
,
summary
)
data_list
.
append
(
data
)
log
.
info
(
f
'{title}===采集成功'
)
getContent
(
num
,
title
,
pub_time
,
origin
,
organ
,
href
,
pub_hao
,
summary
)
num
+=
1
df
=
pd
.
DataFrame
(
np
.
array
(
data_list
))
df
.
columns
=
[
'序号'
,
'标题'
,
'发布时间'
,
'来源'
,
'原文链接'
,
'发文时间'
,
'发文机构'
,
'发文字号'
,
'摘要'
,
'正文'
,
'附件名称'
,
'附件连接'
]
df
.
to_excel
(
'./天津市人民政府政策文件.xlsx'
,
index
=
False
)
# df = pd.DataFrame(np.array(data_list))
# df.columns = ['序号', '标题', '发布时间', '来源', '原文链接', '发文时间', '发文机构', '发文字号', '摘要', '正文', '附件名称', '附件连接']
# df.to_excel('./天津市人民政府政策文件.xlsx', index=False)
if
__name__
==
'__main__'
:
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论