Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Z
zzsn_spider
概览
概览
详情
活动
周期分析
版本库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
王景浩
zzsn_spider
Commits
257631f1
提交
257631f1
authored
9月 08, 2023
作者:
薛凌堃
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
9/8
上级
7dcd1a4c
显示空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
19 行增加
和
75 行删除
+19
-75
annualreportUS.py
comData/annualReport_XQW/annualreportUS.py
+17
-73
雪球网-年报.py
comData/annualReport_ZJH/雪球网-年报.py
+2
-2
没有找到文件。
comData/annualReport_XQW/annualreportUS.py
浏览文件 @
257631f1
...
@@ -19,6 +19,9 @@ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
...
@@ -19,6 +19,9 @@ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from
bs4
import
BeautifulSoup
from
bs4
import
BeautifulSoup
from
kafka
import
KafkaProducer
from
kafka
import
KafkaProducer
# from selenium import webdriver
# from selenium import webdriver
from
base.BaseCore
import
BaseCore
baseCore
=
BaseCore
()
log
=
baseCore
.
getLogger
()
def
paserUrl
(
html
,
listurl
):
def
paserUrl
(
html
,
listurl
):
# soup = BeautifulSoup(html, 'html.parser')
# soup = BeautifulSoup(html, 'html.parser')
...
@@ -108,13 +111,13 @@ def spider(com_name,cik):
...
@@ -108,13 +111,13 @@ def spider(com_name,cik):
for
form
in
form_type_list
:
for
form
in
form_type_list
:
i
+=
1
i
+=
1
if
form
==
'10-K'
or
form
==
'20-F'
:
if
form
==
'10-K'
or
form
==
'20-F'
:
print
(
form
,
i
)
log
.
info
(
form
,
i
)
accessionNumber
=
accessionNumber_list
[
i
]
accessionNumber
=
accessionNumber_list
[
i
]
#发布日期
#发布日期
filingDate
=
filingDate_list
[
i
]
filingDate
=
filingDate_list
[
i
]
year
=
filingDate
[:
4
]
year
=
filingDate
[:
4
]
#
u_1 = cik
u_1
=
cik
u_1
=
'1395064'
#
u_1 = '1395064'
u_2
=
accessionNumber
.
replace
(
'-'
,
''
)
u_2
=
accessionNumber
.
replace
(
'-'
,
''
)
u_3
=
primaryDocument_list
[
i
]
u_3
=
primaryDocument_list
[
i
]
news_url
=
'https://www.sec.gov/Archives/edgar/data/'
+
u_1
+
'/'
+
u_2
+
'/'
+
u_3
news_url
=
'https://www.sec.gov/Archives/edgar/data/'
+
u_1
+
'/'
+
u_2
+
'/'
+
u_3
...
@@ -126,57 +129,13 @@ def spider(com_name,cik):
...
@@ -126,57 +129,13 @@ def spider(com_name,cik):
#相对路径转化为绝对路径
#相对路径转化为绝对路径
soup
=
paserUrl
(
soup
,
news_url
)
soup
=
paserUrl
(
soup
,
news_url
)
content
=
soup
.
text
.
strip
()
content
=
soup
.
text
.
strip
()
# url = f'https://www.sec.gov/edgar/browse/?CIK={cik}&owner=exclude'
# browser.get(url)
# time.sleep(3)
# page_source = browser.page_source
# soup = BeautifulSoup(page_source, 'html.parser')
# # print(soup)
# select_ann = soup.find_all('tr', class_='odd')
#
# for tr in select_ann:
# form_type = tr.find('td').text
# if form_type == '20-F':
# # print(tr)
# # 获取原文链接
# href = tr.find('a', class_='document-link')['href']
# print(href)
# if 'ix?doc' in href:
# href = 'https://www.sec.gov/' + href.split('/ix?doc=/')[1]
# else:
# href = 'https://www.sec.gov' + href
# print(href)
# # 获取发布时间
# a_list = tr.find_all('a')
# # print(a_list)
# for a in a_list:
# text = a.text
# match = re.search(pattern, text)
# if match:
# pub_date = match.group(0)
# # print(pub_date)
# year = pub_date[:4]
# break
# else:
# pub_date = ''
# year = ''
# # 根据年报的链接,请求年报内容,不需要上传文件服务器,直接发送kafka
# browser.get(href)
# time.sleep(3)
# i_page_source = browser.page_source
# i_soup = BeautifulSoup(i_page_source, 'html.parser')
# # print(i_page_source)
# content = i_soup.text
# 采集下来正文内容,直接传输kafka
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
title
=
f
'{com_name}:{year}年年度报告'
title
=
f
'{com_name}:{year}年年度报告'
dic_news
=
{
dic_news
=
{
'attachmentIds'
:
''
,
'attachmentIds'
:
''
,
'author'
:
''
,
'author'
:
''
,
'content'
:
content
,
'content'
:
content
,
'contentWithTag'
:
s
oup
,
'contentWithTag'
:
s
tr
(
soup
)
,
'createDate'
:
time_now
,
'createDate'
:
time_now
,
'deleteFlag'
:
'0'
,
'deleteFlag'
:
'0'
,
'id'
:
''
,
'id'
:
''
,
...
@@ -192,21 +151,21 @@ def spider(com_name,cik):
...
@@ -192,21 +151,21 @@ def spider(com_name,cik):
'socialCreditCode'
:
''
,
'socialCreditCode'
:
''
,
'year'
:
year
'year'
:
year
}
}
print
(
dic_news
)
#
print(dic_news)
# 将相应字段通过kafka传输保存
# 将相应字段通过kafka传输保存
try
:
try
:
producer
=
KafkaProducer
(
bootstrap_servers
=
[
'114.115.159.144:9092'
])
producer
=
KafkaProducer
(
bootstrap_servers
=
[
'114.115.159.144:9092'
])
kafka_result
=
producer
.
send
(
"researchReportTopic"
,
kafka_result
=
producer
.
send
(
"researchReportTopic"
,
json
.
dumps
(
dic_news
,
ensure_ascii
=
False
)
.
encode
(
'utf8'
))
json
.
dumps
(
dic_news
,
ensure_ascii
=
False
)
.
encode
(
'utf8'
))
print
(
kafka_result
.
get
(
timeout
=
10
))
log
.
info
(
kafka_result
.
get
(
timeout
=
10
))
dic_result
=
{
dic_result
=
{
'success'
:
'ture'
,
'success'
:
'ture'
,
'message'
:
'操作成功'
,
'message'
:
'操作成功'
,
'code'
:
'200'
,
'code'
:
'200'
,
}
}
print
(
dic_result
)
log
.
info
(
dic_result
)
except
Exception
as
e
:
except
Exception
as
e
:
dic_result
=
{
dic_result
=
{
...
@@ -215,6 +174,7 @@ def spider(com_name,cik):
...
@@ -215,6 +174,7 @@ def spider(com_name,cik):
'code'
:
'204'
,
'code'
:
'204'
,
'e'
:
e
'e'
:
e
}
}
log
.
info
(
f
'{dic_result}-----{e}'
)
def
getrequest
(
social_code
,
url
,
headers
,
data
):
def
getrequest
(
social_code
,
url
,
headers
,
data
):
...
@@ -261,8 +221,8 @@ if __name__ == '__main__':
...
@@ -261,8 +221,8 @@ if __name__ == '__main__':
while
True
:
while
True
:
start_time
=
time
.
time
()
start_time
=
time
.
time
()
# 获取企业信息
# 获取企业信息
social_code
=
baseCore
.
redicPullData
(
'AnnualEnterprise:usqy_socialCode'
)
#
social_code = baseCore.redicPullData('AnnualEnterprise:usqy_socialCode')
# social_code = '
'
social_code
=
'ZZSN22080900000025
'
if
not
social_code
:
if
not
social_code
:
time
.
sleep
(
20
)
time
.
sleep
(
20
)
continue
continue
...
@@ -273,7 +233,7 @@ if __name__ == '__main__':
...
@@ -273,7 +233,7 @@ if __name__ == '__main__':
time
.
sleep
(
20
)
time
.
sleep
(
20
)
continue
continue
dic_info
=
baseCore
.
getInfomation
(
social_code
)
dic_info
=
baseCore
.
getInfomation
(
social_code
)
count
=
dic_info
[
1
5
]
count
=
dic_info
[
1
6
]
code
=
dic_info
[
3
]
code
=
dic_info
[
3
]
com_name
=
dic_info
[
4
]
com_name
=
dic_info
[
4
]
cik
=
dic_info
[
13
]
cik
=
dic_info
[
13
]
...
@@ -289,26 +249,10 @@ if __name__ == '__main__':
...
@@ -289,26 +249,10 @@ if __name__ == '__main__':
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
takeTime
=
baseCore
.
getTimeCost
(
start_time
,
time
.
time
())
baseCore
.
recordLog
(
social_code
,
taskType
,
state
,
takeTime
,
''
,
exeception
)
baseCore
.
recordLog
(
social_code
,
taskType
,
state
,
takeTime
,
''
,
exeception
)
continue
continue
# code = 'BP'
# com_name = '英国石油公司'
# cik = ''
#"MNSO" post请求 获取企业CIK 正式
# payload = {"keysTyped":f"{code}","narrow":True}
# #测试
# # payload = {"keysTyped": "BP", "narrow":True}
# data = json.dumps(payload)
# result = getrequest(social_code,url,headers,data)
# # print(result)
# #判断接口返回的数据哪一条是该企业 根据股票代码
# tickers = result['hits']['hits']
# for ticker in tickers:
# i_t = ticker['_source']['tickers']
# if i_t == code:
# cik = ticker['_id']
# print(cik)
# break
# break
spider
(
com_name
,
cik
)
spider
(
com_name
,
cik
)
count
+=
1
runType
=
'AnnualReportCount'
baseCore
.
updateRun
(
social_code
,
runType
,
count
)
# break
# break
...
...
comData/annualReport_ZJH/雪球网-年报.py
浏览文件 @
257631f1
# -*-
coding: utf-8 -*-
# -*-
coding: utf-8 -*-
...
@@ -251,7 +251,7 @@ if __name__ == '__main__':
...
@@ -251,7 +251,7 @@ if __name__ == '__main__':
time
.
sleep
(
20
)
time
.
sleep
(
20
)
continue
continue
dic_info
=
baseCore
.
getInfomation
(
social_code
)
dic_info
=
baseCore
.
getInfomation
(
social_code
)
count
=
dic_info
[
1
5
]
count
=
dic_info
[
1
6
]
code
=
dic_info
[
3
]
code
=
dic_info
[
3
]
com_name
=
dic_info
[
4
]
com_name
=
dic_info
[
4
]
if
code
is
None
:
if
code
is
None
:
...
...
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论