Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Z
zzsn_spider
概览
概览
详情
活动
周期分析
版本库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
王景浩
zzsn_spider
Commits
060ce7c4
提交
060ce7c4
authored
8月 12, 2023
作者:
薛凌堃
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
2023/8/12
上级
aedff657
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
190 行增加
和
181 行删除
+190
-181
fbs_annualreport.py
comData/annualReport_ZJH/fbs_annualreport.py
+127
-120
fbs_notice.py
comData/noticeReport_ZJH/fbs_notice.py
+63
-61
没有找到文件。
comData/annualReport_ZJH/fbs_annualreport.py
浏览文件 @
060ce7c4
...
@@ -92,135 +92,142 @@ def SpiderByZJH(url, payload, dic_info, num, start_time):
...
@@ -92,135 +92,142 @@ def SpiderByZJH(url, payload, dic_info, num, start_time):
short_name
=
dic_info
[
4
]
short_name
=
dic_info
[
4
]
soup
=
RequestUrl
(
url
,
payload
,
item_id
,
start_time
)
soup
=
RequestUrl
(
url
,
payload
,
item_id
,
start_time
)
if
soup
==
''
:
if
soup
==
''
:
return
return
False
# 先获取页数
# 先获取页数
page
=
0
page
=
soup
.
find
(
'div'
,
class_
=
'pages'
)
.
find
(
'ul'
,
class_
=
'g-ul'
)
.
text
try
:
page
=
soup
.
find
(
'div'
,
class_
=
'pages'
)
.
find
(
'ul'
,
class_
=
'g-ul'
)
.
text
total
=
re
.
findall
(
r'\d+'
,
page
)[
0
]
except
:
r_page
=
int
(
total
)
%
15
e
=
f
"该企业没有{dic_parms['Catagory2']}数据"
if
r_page
==
0
:
state
=
0
Maxpage
=
int
(
total
)
//
15
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
else
:
baseCore
.
recordLog
(
social_code
,
taskType
,
state
,
takeTime
,
dic_parms
[
'url'
],
f
'{e}'
)
Maxpage
=
int
(
total
)
//
15
+
1
return
False
# 首页和其他页不同,遍历 如果是首页 修改一下链接
if
page
!=
0
:
for
i
in
range
(
1
,
Maxpage
+
1
):
total
=
re
.
findall
(
r'\d+'
,
page
)[
0
]
if
i
==
1
:
r_page
=
int
(
total
)
%
15
href
=
url
if
r_page
==
0
:
Maxpage
=
int
(
total
)
//
15
else
:
else
:
# http://eid.csrc.gov.cn/101811/index_3_f.html
Maxpage
=
int
(
total
)
//
15
+
1
href
=
url
.
split
(
'index'
)[
0
]
+
f
'index_{i}_f.html'
# 首页和其他页不同,遍历 如果是首页 修改一下链接
soup
=
RequestUrl
(
href
,
payload
,
item_id
,
start_time
)
for
i
in
range
(
1
,
Maxpage
+
1
):
if
soup
==
''
:
if
i
==
1
:
continue
href
=
url
tr_list
=
soup
.
find
(
'div'
,
id
=
'txt'
)
.
find_all
(
'tr'
)
else
:
for
tr
in
tr_list
[
1
:]:
# http://eid.csrc.gov.cn/101811/index_3_f.html
td_list
=
tr
.
find_all
(
'td'
)
href
=
url
.
split
(
'index'
)[
0
]
+
f
'index_{i}_f.html'
pdf_url_info
=
td_list
[
2
]
soup
=
RequestUrl
(
href
,
payload
,
item_id
,
start_time
)
# print(pdf_url)
if
soup
==
''
:
pdf_url
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
0
]
.
strip
(
'
\'
'
)
continue
name_pdf
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
1
]
.
strip
(
'
\'
'
)
tr_list
=
soup
.
find
(
'div'
,
id
=
'txt'
)
.
find_all
(
'tr'
)
for
tr
in
tr_list
[
1
:]:
# pub_time = pdf_url_info['onclick'].strip('downloadPdf1(').split(',')[2].strip('\'')
td_list
=
tr
.
find_all
(
'td'
)
# print(name)
pdf_url_info
=
td_list
[
2
]
report_type
=
td_list
[
4
]
.
text
.
strip
()
# print(pdf_url)
# print(report_type)
pdf_url
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
0
]
.
strip
(
'
\'
'
)
if
report_type
==
'年报'
:
name_pdf
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
1
]
.
strip
(
'
\'
'
)
if
'摘要'
in
name_pdf
:
continue
# pub_time = pdf_url_info['onclick'].strip('downloadPdf1(').split(',')[2].strip('\'')
# 年份还从pdf名称里抽取
# print(name)
try
:
report_type
=
td_list
[
4
]
.
text
.
strip
()
year
=
re
.
findall
(
'
\
d{4}
\
s*年'
,
name_pdf
)[
0
]
.
replace
(
'年'
,
''
)
# print(report_type)
except
Exception
as
e
:
if
report_type
==
'年报'
:
pub_time
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
2
]
.
strip
(
'
\'
'
)[:
4
]
if
'摘要'
in
name_pdf
:
year
=
int
(
pub_time
)
-
1
year
=
str
(
year
)
page_size
=
0
sel_sql
=
'''select item_id,year from clb_sys_attachment where item_id =
%
s and year =
%
s'''
cursor_
.
execute
(
sel_sql
,
(
item_id
,
year
))
selects
=
cursor_
.
fetchone
()
if
selects
:
print
(
f
'com_name:{short_name}、{year}已存在'
)
continue
else
:
# 类型为年报的话就解析该年报pdf,并入库
for
i
in
range
(
0
,
3
):
try
:
resp_content
=
requests
.
request
(
"GET"
,
pdf_url
)
.
content
# 获取pdf页数
with
fitz
.
open
(
stream
=
resp_content
,
filetype
=
'pdf'
)
as
doc
:
page_size
=
doc
.
page_count
break
except
Exception
as
e
:
print
(
e
)
time
.
sleep
(
3
)
continue
if
page_size
<
1
:
# pdf解析失败
print
(
f
'==={short_name}、{year}===pdf解析失败'
)
state
=
0
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
baseCore
.
recordLog
(
item_id
,
taskType
,
state
,
takeTime
,
pdf_url
,
'pdf解析失败'
)
continue
continue
result
=
''
# 年份还从pdf名称里抽取
for
i
in
range
(
0
,
3
):
try
:
try
:
year
=
re
.
findall
(
'
\
d{4}
\
s*年'
,
name_pdf
)[
0
]
.
replace
(
'年'
,
''
)
result
=
client
.
upload_by_buffer
(
resp_content
,
file_ext_name
=
'pdf'
)
except
Exception
as
e
:
break
pub_time
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
2
]
.
strip
(
'
\'
'
)[:
4
]
except
Exception
as
e
:
year
=
int
(
pub_time
)
-
1
print
(
e
)
year
=
str
(
year
)
time
.
sleep
(
3
)
continue
page_size
=
0
if
result
==
''
:
e
=
'上传服务器失败'
sel_sql
=
'''select item_id,year from clb_sys_attachment where item_id =
%
s and year =
%
s'''
state
=
0
cursor_
.
execute
(
sel_sql
,
(
item_id
,
year
))
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
selects
=
cursor_
.
fetchone
()
baseCore
.
recordLog
(
item_id
,
taskType
,
state
,
takeTime
,
pdf_url
,
e
)
if
selects
:
print
(
f
'com_name:{short_name}、{year}已存在'
)
continue
continue
else
:
if
'Remote file_id'
in
str
(
result
)
and
'Uploaded size'
in
str
(
result
):
# 类型为年报的话就解析该年报pdf,并入库
for
i
in
range
(
0
,
3
):
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
try
:
resp_content
=
requests
.
request
(
"GET"
,
pdf_url
)
.
content
type_id
=
'1'
# 获取pdf页数
with
fitz
.
open
(
stream
=
resp_content
,
filetype
=
'pdf'
)
as
doc
:
item_id
=
item_id
page_size
=
doc
.
page_count
break
group_name
=
'group1'
except
Exception
as
e
:
print
(
e
)
path
=
bytes
.
decode
(
result
[
'Remote file_id'
])
.
replace
(
'group1'
,
''
)
time
.
sleep
(
3
)
full_path
=
bytes
.
decode
(
result
[
'Remote file_id'
])
continue
category
=
'pdf'
if
page_size
<
1
:
file_size
=
result
[
'Uploaded size'
]
# pdf解析失败
order_by
=
num
print
(
f
'==={short_name}、{year}===pdf解析失败'
)
status
=
1
state
=
0
create_by
=
'XueLingKun'
create_time
=
time_now
page_size
=
page_size
try
:
tableUpdate
(
year
,
name_pdf
,
type_id
,
item_id
,
group_name
,
path
,
full_path
,
category
,
file_size
,
order_by
,
status
,
create_by
,
create_time
,
page_size
)
state
=
1
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
baseCore
.
recordLog
(
item_id
,
taskType
,
state
,
takeTime
,
pdf_url
,
''
)
baseCore
.
recordLog
(
item_id
,
taskType
,
state
,
takeTime
,
pdf_url
,
'pdf解析失败'
)
except
:
continue
e
=
'数据库传输失败'
result
=
''
for
i
in
range
(
0
,
3
):
try
:
result
=
client
.
upload_by_buffer
(
resp_content
,
file_ext_name
=
'pdf'
)
break
except
Exception
as
e
:
print
(
e
)
time
.
sleep
(
3
)
continue
if
result
==
''
:
e
=
'上传服务器失败'
state
=
0
state
=
0
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
baseCore
.
recordLog
(
item_id
,
taskType
,
state
,
takeTime
,
pdf_url
,
e
)
baseCore
.
recordLog
(
item_id
,
taskType
,
state
,
takeTime
,
pdf_url
,
e
)
num
=
num
+
1
continue
time
.
sleep
(
2
)
else
:
e
=
'采集失败'
state
=
0
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
baseCore
.
recordLog
(
item_id
,
taskType
,
state
,
takeTime
,
pdf_url
,
e
)
continue
else
:
continue
if
'Remote file_id'
in
str
(
result
)
and
'Uploaded size'
in
str
(
result
):
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
type_id
=
'1'
item_id
=
item_id
group_name
=
'group1'
path
=
bytes
.
decode
(
result
[
'Remote file_id'
])
.
replace
(
'group1'
,
''
)
full_path
=
bytes
.
decode
(
result
[
'Remote file_id'
])
category
=
'pdf'
file_size
=
result
[
'Uploaded size'
]
order_by
=
num
status
=
1
create_by
=
'XueLingKun'
create_time
=
time_now
page_size
=
page_size
try
:
tableUpdate
(
year
,
name_pdf
,
type_id
,
item_id
,
group_name
,
path
,
full_path
,
category
,
file_size
,
order_by
,
status
,
create_by
,
create_time
,
page_size
)
state
=
1
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
baseCore
.
recordLog
(
item_id
,
taskType
,
state
,
takeTime
,
pdf_url
,
''
)
except
:
e
=
'数据库传输失败'
state
=
0
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
baseCore
.
recordLog
(
item_id
,
taskType
,
state
,
takeTime
,
pdf_url
,
e
)
num
=
num
+
1
time
.
sleep
(
2
)
else
:
e
=
'采集失败'
state
=
0
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
baseCore
.
recordLog
(
item_id
,
taskType
,
state
,
takeTime
,
pdf_url
,
e
)
continue
else
:
continue
return
True
def
getUrl
(
code
,
url_parms
,
Catagory2_parms
):
def
getUrl
(
code
,
url_parms
,
Catagory2_parms
):
# 深市
# 深市
...
...
comData/noticeReport_ZJH/fbs_notice.py
浏览文件 @
060ce7c4
...
@@ -261,76 +261,78 @@ def SpiderByZJH(url, payload, dic_info, start_time): # dic_info 数据库中获
...
@@ -261,76 +261,78 @@ def SpiderByZJH(url, payload, dic_info, start_time): # dic_info 数据库中获
if
soup
==
''
:
if
soup
==
''
:
return
False
return
False
# 先获取页数
# 先获取页数
page
=
0
try
:
try
:
page
=
soup
.
find
(
'div'
,
class_
=
'pages'
)
.
find
(
'ul'
,
class_
=
'g-ul'
)
.
text
page
=
soup
.
find
(
'div'
,
class_
=
'pages'
)
.
find
(
'ul'
,
class_
=
'g-ul'
)
.
text
except
:
except
:
e
=
f
"该企业没有{dic_parms['Catagory2']}数据"
e
=
f
"该企业没有{dic_parms['Catagory2']}数据"
state
=
0
state
=
0
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
baseCore
.
recordLog
(
social_code
,
taskType
,
state
,
takeTime
,
dic_parms
[
'url'
],
'Kafka操作失败
'
)
baseCore
.
recordLog
(
social_code
,
taskType
,
state
,
takeTime
,
dic_parms
[
'url'
],
f
'{e}
'
)
return
False
return
False
total
=
re
.
findall
(
r'\d+'
,
page
)[
0
]
if
page
!=
0
:
total
=
re
.
findall
(
r'\d+'
,
page
)[
0
]
r_page
=
int
(
total
)
%
15
r_page
=
int
(
total
)
%
15
if
r_page
==
0
:
if
r_page
==
0
:
Maxpage
=
int
(
total
)
//
15
Maxpage
=
int
(
total
)
//
15
else
:
Maxpage
=
int
(
total
)
//
15
+
1
log
.
info
(
f
'{short_name}====={code}===========一共{total}条,{Maxpage}页'
)
# 首页和其他页不同,遍历 如果是首页 修改一下链接
for
i
in
range
(
1
,
Maxpage
+
1
):
log
.
info
(
f
'==========正在采集第{i}页========='
)
if
i
==
1
:
href
=
url
else
:
else
:
# http://eid.csrc.gov.cn/101811/index_3_f.html
Maxpage
=
int
(
total
)
//
15
+
1
href
=
url
.
split
(
'index'
)[
0
]
+
f
'index_{i}_f.html'
log
.
info
(
f
'{short_name}====={code}===========一共{total}条,{Maxpage}页'
)
# 首页和其他页不同,遍历 如果是首页 修改一下链接
soup
=
RequestUrl
(
href
,
payload
,
social_code
,
start_time
)
for
i
in
range
(
1
,
Maxpage
+
1
):
if
soup
==
''
:
log
.
info
(
f
'==========正在采集第{i}页========='
)
continue
if
i
==
1
:
tr_list
=
soup
.
find
(
'div'
,
id
=
'txt'
)
.
find_all
(
'tr'
)
href
=
url
pageIndex
=
0
else
:
for
tr
in
tr_list
[
1
:]:
# http://eid.csrc.gov.cn/101811/index_3_f.html
pageIndex
+=
1
href
=
url
.
split
(
'index'
)[
0
]
+
f
'index_{i}_f.html'
td_list
=
tr
.
find_all
(
'td'
)
pdf_url_info
=
td_list
[
2
]
soup
=
RequestUrl
(
href
,
payload
,
social_code
,
start_time
)
# print(pdf_url)
if
soup
==
''
:
pdf_url
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
0
]
.
strip
(
'
\'
'
)
continue
name_pdf
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
1
]
.
strip
(
'
\'
'
)
tr_list
=
soup
.
find
(
'div'
,
id
=
'txt'
)
.
find_all
(
'tr'
)
pageIndex
=
0
pub_time
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
2
]
.
strip
(
'
\'
'
)
for
tr
in
tr_list
[
1
:]:
year
=
pub_time
[:
4
]
pageIndex
+=
1
report_type
=
td_list
[
4
]
.
text
.
strip
()
td_list
=
tr
.
find_all
(
'td'
)
pdf_url_info
=
td_list
[
2
]
# 信息插入数据库
# print(pdf_url)
insert
=
InsterInto
(
short_name
,
social_code
,
name_pdf
,
pub_time
,
pdf_url
,
report_type
)
pdf_url
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
0
]
.
strip
(
'
\'
'
)
log
.
info
(
f
'======={short_name}========{code}===插入公告库成功'
)
name_pdf
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
1
]
.
strip
(
'
\'
'
)
if
insert
:
# # 公告信息列表
pub_time
=
pdf_url_info
[
'onclick'
]
.
strip
(
'downloadPdf1('
)
.
split
(
','
)[
2
]
.
strip
(
'
\'
'
)
# okCount = okCount + 1
year
=
pub_time
[:
4
]
# 解析PDF内容,先获取PDF链接 下载 解析成功,解析失败 ,传输成功,传输失败
report_type
=
td_list
[
4
]
.
text
.
strip
()
result
=
GetContent
(
pdf_url
,
name_pdf
,
social_code
,
year
,
pub_time
,
start_time
)
# 信息插入数据库
if
result
:
insert
=
InsterInto
(
short_name
,
social_code
,
name_pdf
,
pub_time
,
pdf_url
,
report_type
)
# 公告信息列表
log
.
info
(
f
'======={short_name}========{code}===插入公告库成功'
)
okCount
=
okCount
+
1
if
insert
:
log
.
info
(
f
'{short_name}==============解析传输操作成功'
)
# # 公告信息列表
state
=
1
# okCount = okCount + 1
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
# 解析PDF内容,先获取PDF链接 下载 解析成功,解析失败 ,传输成功,传输失败
baseCore
.
recordLog
(
social_code
,
taskType
,
state
,
takeTime
,
pdf_url
,
''
)
result
=
GetContent
(
pdf_url
,
name_pdf
,
social_code
,
year
,
pub_time
,
start_time
)
pass
else
:
if
result
:
errorCount
+=
1
# 公告信息列表
# time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
okCount
=
okCount
+
1
log
.
error
(
f
'{short_name}=============解析或传输操作失败'
)
log
.
info
(
f
'{short_name}==============解析传输操作成功'
)
# try:
state
=
1
# insert_err_sql = f"insert into dt_err(xydm,`from`,url,title,pub_date,zhaiyao,create_date,state,pageNo,pageIndex,type) values('{social_code}','证监会','{pdf_url}','{name_pdf}','{pub_time}',' ',now(),1,{i},{pageIndex},'1')"
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
# cursor_.execute(insert_err_sql)
baseCore
.
recordLog
(
social_code
,
taskType
,
state
,
takeTime
,
pdf_url
,
''
)
# cnx_.commit()
pass
# except:
else
:
# pass
errorCount
+=
1
continue
# time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
log
.
error
(
f
'{short_name}=============解析或传输操作失败'
)
# try:
# insert_err_sql = f"insert into dt_err(xydm,`from`,url,title,pub_date,zhaiyao,create_date,state,pageNo,pageIndex,type) values('{social_code}','证监会','{pdf_url}','{name_pdf}','{pub_time}',' ',now(),1,{i},{pageIndex},'1')"
# cursor_.execute(insert_err_sql)
# cnx_.commit()
# except:
# pass
continue
return
True
return
True
#state2
#state2
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论