Skip to content
项目
群组
代码片段
帮助
当前项目
正在载入...
登录 / 注册
切换导航面板
Z
zzsn_spider
概览
概览
详情
活动
周期分析
版本库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
问题
0
议题
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
CI / CD
CI / CD
流水线
作业
日程表
图表
维基
Wiki
代码片段
代码片段
成员
成员
折叠边栏
关闭边栏
活动
图像
聊天
创建新问题
作业
提交
问题看板
Open sidebar
王景浩
zzsn_spider
Commits
caa3a936
提交
caa3a936
authored
8月 15, 2023
作者:
LiuLiYuan
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'origin/master'
# Conflicts: # base/BaseCore.py
上级
21214964
0b2f4ac1
隐藏空白字符变更
内嵌
并排
正在显示
3 个修改的文件
包含
324 行增加
和
342 行删除
+324
-342
BaseCore.py
base/BaseCore.py
+34
-34
oneWeixin.py
comData/weixin_solo/oneWeixin.py
+255
-282
test.py
comData/weixin_solo/test.py
+35
-26
没有找到文件。
base/BaseCore.py
浏览文件 @
caa3a936
...
...
@@ -15,15 +15,15 @@ from selenium.webdriver.chrome.service import Service
from
openpyxl
import
Workbook
import
langid
# 注意 程序退出前 调用BaseCore.close() 关闭相关资源
class
BaseCore
:
# 序列号
__seq
=
0
# 代理池 数据库连接
__cnx_proxy
=
None
__cnx_proxy
=
None
__cursor_proxy
=
None
# agent 池
__USER_AGENT_LIST
=
[
...
...
@@ -218,9 +218,8 @@ class BaseCore:
'Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5'
]
# Android agent池
__USER_PHONE_AGENT_LIST
=
[
'Mozilla/5.0 (Linux; Android 7.1.1; OPPO R9sk) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36'
]
#Android agent池
__USER_PHONE_AGENT_LIST
=
[
'Mozilla/5.0 (Linux; Android 7.1.1; OPPO R9sk) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.111 Mobile Safari/537.36'
]
def
__init__
(
self
):
self
.
__cnx_proxy
=
pymysql
.
connect
(
host
=
'114.115.159.144'
,
user
=
'root'
,
password
=
'zzsn9988'
,
db
=
'clb_project'
,
...
...
@@ -239,11 +238,12 @@ class BaseCore:
self
.
__cnx_proxy
.
close
()
self
.
cursor
.
close
()
self
.
cnx
.
close
()
except
:
except
:
pass
# 计算耗时
def
getTimeCost
(
self
,
start
,
end
):
def
getTimeCost
(
self
,
start
,
end
):
seconds
=
int
(
end
-
start
)
m
,
s
=
divmod
(
seconds
,
60
)
h
,
m
=
divmod
(
m
,
60
)
...
...
@@ -256,7 +256,6 @@ class BaseCore:
else
:
ms
=
int
((
end
-
start
)
*
1000
)
return
"
%
d毫秒"
%
(
ms
)
# 当前时间格式化
# 1 : 2001-01-01 12:00:00 %Y-%m-%d %H:%M:%S
# 2 : 010101120000 %y%m%d%H%M%S
...
...
@@ -286,7 +285,7 @@ class BaseCore:
return
"ZZSN"
+
self
.
getNowTime
(
2
)
+
str
(
self
.
__seq
)
.
zfill
(
3
)
# 日志格式
def
logFormate
(
self
,
record
,
handler
):
def
logFormate
(
self
,
record
,
handler
):
formate
=
"[{date}] [{level}] [{filename}] [{func_name}] [{lineno}] {msg}"
.
format
(
date
=
record
.
time
,
# 日志时间
level
=
record
.
level_name
,
# 日志等级
...
...
@@ -296,9 +295,8 @@ class BaseCore:
msg
=
record
.
message
# 日志内容
)
return
formate
# 获取logger
def
getLogger
(
self
,
fileLogFlag
=
True
,
stdOutFlag
=
True
):
def
getLogger
(
self
,
fileLogFlag
=
True
,
stdOutFlag
=
True
):
dirname
,
filename
=
os
.
path
.
split
(
os
.
path
.
abspath
(
sys
.
argv
[
0
]))
dirname
=
os
.
path
.
join
(
dirname
,
"logs"
)
filename
=
filename
.
replace
(
".py"
,
""
)
+
".log"
...
...
@@ -347,34 +345,34 @@ class BaseCore:
proxy_list
.
append
(
proxy
)
return
proxy_list
[
random
.
randint
(
0
,
3
)]
#
字符串截取
def
getSubStr
(
self
,
str
,
beginStr
,
endStr
):
if
beginStr
==
''
:
#字符串截取
def
getSubStr
(
self
,
str
,
beginStr
,
endStr
):
if
beginStr
==
''
:
pass
else
:
begin
=
str
.
find
(
beginStr
)
if
begin
==
-
1
:
begin
=
0
str
=
str
[
begin
:]
if
endStr
==
''
:
begin
=
str
.
find
(
beginStr
)
if
begin
==
-
1
:
begin
=
0
str
=
str
[
begin
:]
if
endStr
==
''
:
pass
else
:
end
=
str
.
rfind
(
endStr
)
if
end
==
-
1
:
end
=
str
.
rfind
(
endStr
)
if
end
==
-
1
:
pass
else
:
str
=
str
[
0
:
end
+
1
]
str
=
str
[
0
:
end
+
1
]
return
str
# 繁体字转简体字
def
hant_2_hans
(
self
,
hant_str
:
str
):
def
hant_2_hans
(
self
,
hant_str
:
str
):
'''
Function: 将 hant_str 由繁体转化为简体
'''
return
zhconv
.
convert
(
hant_str
,
'zh-hans'
)
# 判断字符串里是否含数字
def
str_have_num
(
self
,
str_num
):
def
str_have_num
(
self
,
str_num
):
panduan
=
False
for
str_1
in
str_num
:
...
...
@@ -394,7 +392,7 @@ class BaseCore:
# return gw_item.decode() if gw_item else None
# 从Redis的List中获取并移除一个元素
def
redicPullData
(
self
,
key
):
def
redicPullData
(
self
,
key
):
item
=
self
.
r
.
lpop
(
key
)
return
item
.
decode
()
if
item
else
None
...
...
@@ -417,7 +415,7 @@ class BaseCore:
chrome_options
=
webdriver
.
ChromeOptions
()
if
headless
:
chrome_options
.
add_argument
(
'--headless'
)
chrome_options
.
add_argument
(
'--disable-gpu'
)
chrome_options
.
add_argument
(
'--disable-gpu'
)
chrome_options
.
add_experimental_option
(
"excludeSwitches"
,
[
"enable-automation"
])
chrome_options
.
add_experimental_option
(
'useAutomationExtension'
,
False
)
...
...
@@ -460,16 +458,16 @@ class BaseCore:
print
(
e
)
self
.
cnx
.
commit
()
#
获取企查查token
#获取企查查token
def
GetToken
(
self
):
#
获取企查查token
#获取企查查token
query
=
"select token from QCC_token "
# token = '67ec7402166df1da84ae83c4b95cefc0' # 需要隔两个小时左右抓包修改
self
.
cursor
.
execute
(
query
)
token
=
self
.
cursor
.
fetchone
()[
0
]
return
token
#
检测语言
#检测语言
def
detect_language
(
self
,
text
):
# 使用langid.py判断文本的语言
result
=
langid
.
classify
(
text
)
...
...
@@ -479,11 +477,11 @@ class BaseCore:
return
'cn'
return
result
[
0
]
#
追加接入excel
def
writerToExcel
(
self
,
detailList
,
filename
):
#追加接入excel
def
writerToExcel
(
self
,
detailList
,
filename
):
# filename='baidu搜索.xlsx'
# 读取已存在的xlsx文件
existing_data
=
pd
.
read_excel
(
filename
,
engine
=
'openpyxl'
)
existing_data
=
pd
.
read_excel
(
filename
,
engine
=
'openpyxl'
)
# 创建新的数据
new_data
=
pd
.
DataFrame
(
data
=
detailList
)
# 将新数据添加到现有数据的末尾
...
...
@@ -492,6 +490,8 @@ class BaseCore:
combined_data
.
to_excel
(
filename
,
index
=
False
)
# return combined_data
#
对失败或者断掉的企业 重新放入redis
def
rePutIntoR
(
self
,
item
):
#对失败或者断掉的企业 重新放入redis
def
rePutIntoR
(
self
,
item
):
self
.
r
.
rpush
(
'NewsEnterprise:gwqy_socialCode'
,
item
)
comData/weixin_solo/oneWeixin.py
浏览文件 @
caa3a936
# -*- coding: utf-8 -*-
'''
记录一天能采多少公众号
记录一天能采多少公众号
,建一个数据库表 更新公众号的状态
'''
import
requests
,
time
,
random
,
json
,
pymysql
,
redis
...
...
@@ -17,13 +18,17 @@ from base.BaseCore import BaseCore
import
os
baseCore
=
BaseCore
()
log
=
baseCore
.
getLogger
()
cnx_
=
baseCore
.
cnx
cursor_
=
baseCore
.
cursor
cnx
=
pymysql
.
connect
(
host
=
"114.116.44.11"
,
user
=
"root"
,
password
=
"f7s0&7qqtK"
,
db
=
"clb_project"
,
charset
=
"utf8mb4"
)
cursor
=
cnx
.
cursor
()
r
=
baseCore
.
r
urllib3
.
disable_warnings
()
def
check_url
(
sid
,
article_url
):
r
=
redis
.
Redis
(
host
=
"114.115.236.206"
,
port
=
6379
,
password
=
'clbzzsn'
)
res
=
r
.
sismember
(
f
'wx_url_{sid}'
,
article_url
)
# 注意是 保存set的方式
res
=
r
.
sismember
(
f
'wx_url_{sid}'
,
article_url
)
if
res
==
1
:
return
True
else
:
...
...
@@ -63,7 +68,7 @@ def get_proxy():
return
proxy_list
def
get_info
(
json_search
):
def
get_info
(
sid
,
json_search
):
num_caiji
=
0
kaishi_time
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
obsClient
=
ObsClient
(
...
...
@@ -81,9 +86,9 @@ def get_info(json_search):
url_news
=
one_news
[
'link'
]
#
url_ft = check_url(sid, url_news)
#
if url_ft:
# return list_all_info,url_news,news_title
url_ft
=
check_url
(
sid
,
url_news
)
if
url_ft
:
return
list_all_info
,
num_caiji
try
:
res_news
=
requests
.
get
(
url_news
,
timeout
=
20
)
except
:
...
...
@@ -97,10 +102,24 @@ def get_info(json_search):
del
news_html
[
'class'
]
except
:
pass
news_content
=
news_html
.
text
try
:
news_content
=
news_html
.
text
except
:
log
.
info
(
f
'--------内容为空--------{url_news}--------'
)
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
false
=
[
news_title
,
url_news
,
news_html
,
'文章内容为空'
,
time_now
]
insertSql
=
f
"insert into WeixinGZH (site_name,site_url,json_error_info,error_type,create_time) values (
%
s,
%
s,
%
s,
%
s,
%
s)"
cursor_
.
execute
(
insertSql
,
tuple
(
false
))
cnx_
.
commit
()
continue
list_img
=
news_html
.
find_all
(
'img'
)
for
num_img
in
range
(
len
(
list_img
)):
img_one
=
list_img
[
num_img
]
...
...
@@ -149,18 +168,19 @@ def get_info(json_search):
'source'
:
'11'
,
'createDate'
:
time_now
}
#
for nnn in range(0, 3):
#
try:
#
producer = KafkaProducer(bootstrap_servers=['114.115.159.144:9092'])
#
kafka_result = producer.send("crawlerInfo", json.dumps(dic_info, ensure_ascii=False).encode('utf8'))
#
kafka_time_out = kafka_result.get(timeout=10)
#
# add_url(sid, url_news)
#
break
#
except:
#
time.sleep(5)
#
continue
for
nnn
in
range
(
0
,
3
):
try
:
producer
=
KafkaProducer
(
bootstrap_servers
=
[
'114.115.159.144:9092'
])
kafka_result
=
producer
.
send
(
"crawlerInfo"
,
json
.
dumps
(
dic_info
,
ensure_ascii
=
False
)
.
encode
(
'utf8'
))
kafka_time_out
=
kafka_result
.
get
(
timeout
=
10
)
# add_url(sid, url_news)
break
except
:
time
.
sleep
(
5
)
continue
num_caiji
=
num_caiji
+
1
list_all_info
.
append
(
dic_info
)
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
dic_info2
=
{
'infoSourceId'
:
sid
,
...
...
@@ -171,16 +191,45 @@ def get_info(json_search):
'dispatcherStatus'
:
'1'
,
'source'
:
'1'
,
}
# for nnn2 in range(0, 3):
# try:
# producer2 = KafkaProducer(bootstrap_servers=['114.115.159.144:9092'])
# kafka_result2 = producer2.send("collectionAndDispatcherInfo",
# json.dumps(dic_info2, ensure_ascii=False).encode('utf8'))
# break
# except:
# time.sleep(5)
# continue
return
list_all_info
,
url_news
,
news_title
for
nnn2
in
range
(
0
,
3
):
try
:
producer2
=
KafkaProducer
(
bootstrap_servers
=
[
'114.115.159.144:9092'
])
kafka_result2
=
producer2
.
send
(
"collectionAndDispatcherInfo"
,
json
.
dumps
(
dic_info2
,
ensure_ascii
=
False
)
.
encode
(
'utf8'
))
break
except
:
time
.
sleep
(
5
)
continue
return
list_all_info
,
num_caiji
#定时
def
getFromSql
():
selectSql
=
"SELECT info_source_code from info_source where site_uri like '
%
mp.weixin.qq.com
%
'"
cursor
.
execute
(
selectSql
)
results
=
cursor
.
fetchall
()
result_list
=
[
item
[
0
]
for
item
in
results
]
#放入redis
for
item
in
result_list
:
r
.
rpush
(
'WeiXinGZH:infoSourceCode'
,
item
)
#刷新浏览器并获得token
def
flushAndGetToken
(
list_b
):
browser_run
=
list_b
[
0
]
log
.
info
(
'======刷新浏览器====='
)
browser_run
.
refresh
()
cookie_list
=
browser_run
.
get_cookies
()
cur_url
=
browser_run
.
current_url
token
=
cur_url
.
split
(
'token='
)[
1
]
log
.
info
(
f
'===========当前token为:{token}============'
)
cookies
=
{}
for
cookie
in
cookie_list
:
cookies
[
cookie
[
'name'
]]
=
cookie
[
'value'
]
return
token
,
cookies
#采集失败的公众号 重新放入redis
def
rePutIntoR
(
item
):
r
.
rpush
(
'WeiXinGZH:infoSourceCode'
,
item
)
if
__name__
==
"__main__"
:
...
...
@@ -195,288 +244,212 @@ if __name__=="__main__":
opt
=
webdriver
.
ChromeOptions
()
opt
.
add_argument
(
'user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'
)
# opt.add_argument(f"--proxy-server={ip}")
opt
.
add_argument
(
"--ignore-certificate-errors"
)
opt
.
add_argument
(
"--ignore-ssl-errors"
)
opt
.
add_experimental_option
(
"excludeSwitches"
,
[
"enable-automation"
])
opt
.
add_experimental_option
(
'excludeSwitches'
,
[
'enable-logging'
])
opt
.
add_experimental_option
(
'useAutomationExtension'
,
False
)
opt
.
binary_location
=
r'D:\crawler\baidu_crawler\tool\Google\Chrome\Application\chrome.exe'
chromedriver
=
r'C:\Users\WIN10\DataspellProjects\crawlerProjectDemo\tmpcrawler\cmd100\chromedriver.exe'
#
opt.binary_location =r'D:\crawler\baidu_crawler\tool\Google\Chrome\Application\chrome.exe'
#
chromedriver = r'C:\Users\WIN10\DataspellProjects\crawlerProjectDemo\tmpcrawler\cmd100\chromedriver.exe'
chromedriver
=
r'D:/chrome/chromedriver.exe'
browser1
=
webdriver
.
Chrome
(
chrome_options
=
opt
,
executable_path
=
chromedriver
)
list_b
=
[
browser1
]
url
=
"https://mp.weixin.qq.com/"
browser1
.
get
(
url
)
# browser2.get(url)
# browser3.get(url)
# 可改动
time
.
sleep
(
30
)
num_b
=
0
browser_run
=
list_b
[
0
]
log
.
info
(
'======刷新浏览器====='
)
browser_run
.
refresh
()
cookie_list
=
browser_run
.
get_cookies
()
cur_url
=
browser_run
.
current_url
token
=
cur_url
.
split
(
'token='
)[
1
]
log
.
info
(
f
'===========当前token为:{token}============'
)
cookies
=
{}
for
cookie
in
cookie_list
:
cookies
[
cookie
[
'name'
]]
=
cookie
[
'value'
]
# todo:从数据库里读信息,放入redis,定时任务 每天放入数据
# getFromSql()
s
=
requests
.
session
()
# 记录运行公众号的个数
count
=
0
while
True
:
all
=
[]
# 刷新浏览器并获取当前token和cookie
token
,
cookies
=
flushAndGetToken
(
list_b
)
list_all_info
=
[]
list_error_url
=
[]
list_laiyuan
=
[]
cnx
=
pymysql
.
connect
(
host
=
"114.116.44.11"
,
user
=
"root"
,
password
=
"f7s0&7qqtK"
,
db
=
"clb_project"
,
charset
=
"utf8mb4"
)
log
.
info
(
'===========获取公众号============'
)
start_
=
time
.
time
()
with
cnx
.
cursor
()
as
cursor
:
sql
=
"SELECT site_uri,id,site_name,info_source_code from info_source where site_uri like '
%
mp.weixin.qq.com
%
'"
cursor
.
execute
(
sql
)
rows
=
cursor
.
fetchall
()
# 将数据库中的数据切分为两部分
for
row
in
rows
:
# print(len(rows[:945]))
# if row[2]=='南方周末':
dic_url
=
{
'url'
:
row
[
0
],
'sid'
:
row
[
1
],
'name'
:
row
[
2
],
'info_source_code'
:
row
[
3
],
'biz'
:
''
}
list_laiyuan
.
append
(
dic_url
)
log
.
info
(
f
'===========获取公众号完成,耗时{baseCore.getTimeCost(start_,time.time())}============'
)
# list_laiyuan.reverse()
# #todo:redis中数据 pop一条
infoSourceCode
=
baseCore
.
redicPullData
(
'WeiXinGZH:infoSourceCode'
)
if
infoSourceCode
==
'None'
:
#当一次采集完之后,重新插入数据并等待插入完成
getFromSql
()
time
.
sleep
(
20
)
log
.
info
(
f
'========本次公众号已采集完毕,共采集{count}个公众号=========总耗时:{baseCore.getTimeCost(start_,time.time())}'
)
continue
sql
=
f
"SELECT site_uri,id,site_name,info_source_code from info_source where info_source_code = '{infoSourceCode}' "
# '一带一路百人论坛'
# sql = f"SELECT site_uri,id,site_name,info_source_code from info_source where site_name = '一带一路百人论坛' "
cursor
.
execute
(
sql
)
row
=
cursor
.
fetchone
()
dic_url
=
{
'url_'
:
row
[
0
],
'sid'
:
row
[
1
],
'name'
:
row
[
2
],
'info_source_code'
:
row
[
3
],
'biz'
:
''
}
log
.
info
(
'===========获取biz=========='
)
start__
=
time
.
time
()
for
dic_one
in
list_laiyuan
:
url
=
dic_one
[
'url'
]
try
:
biz
=
url
.
split
(
'__biz='
)[
1
]
.
split
(
'==&'
)[
0
]
.
split
(
'='
)[
0
]
dic_one
[
'biz'
]
=
biz
except
:
continue
log
.
info
(
f
'==========获取biz完成,耗时{baseCore.getTimeCost(start__,time.time())}=========='
)
# list_biz.append(biz)
# list_laiyuan.reverse()
#记录错误的biz及相关信息
biz_error_biz
=
[]
biz_error_origin
=
[]
biz_error_code
=
[]
#记录解析成功但采集失败的相关信息
get_error_biz
=
[]
get_error_origin
=
[]
get_error_code
=
[]
#记录解析失败的相关信息
json_error_biz
=
[]
json_error_origin
=
[]
json_error_code
=
[]
for
num_biz
in
range
(
0
,
len
(
list_laiyuan
)):
browser_run
.
refresh
()
cookie_list
=
browser_run
.
get_cookies
()
cur_url
=
browser_run
.
current_url
token
=
cur_url
.
split
(
'token='
)[
1
]
s
.
cookies
.
update
(
cookies
)
url_
=
dic_url
[
'url_'
]
origin
=
dic_url
[
'name'
]
info_source_code
=
dic_url
[
'info_source_code'
]
sid
=
dic_url
[
'sid'
]
try
:
biz
=
url_
.
split
(
'__biz='
)[
1
]
.
split
(
'==&'
)[
0
]
.
split
(
'='
)[
0
]
dic_url
[
'biz'
]
=
biz
except
Exception
as
e
:
log
.
info
(
f
'---公众号--{origin}---biz错误'
)
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
log
.
info
(
f
'=========刷新时间:{time_now}========='
)
log
.
info
(
f
'=========当前token为:{token}========='
)
cookies
=
{}
for
cookie
in
cookie_list
:
cookies
[
cookie
[
'name'
]]
=
cookie
[
'value'
]
list_url
=
[]
s
.
cookies
.
update
(
cookies
)
sid
=
list_laiyuan
[
num_biz
][
'sid'
]
origin
=
list_laiyuan
[
num_biz
][
'name'
]
info_source_code
=
list_laiyuan
[
num_biz
][
'info_source_code'
]
biz
=
list_laiyuan
[
num_biz
][
'biz'
]
if
biz
:
pass
else
:
continue
fakeid
=
biz
+
'=='
url_search
=
f
'https://mp.weixin.qq.com/cgi-bin/appmsg?action=list_ex&begin=5&count=5&fakeid={fakeid}&type=9&query=&token={token}&lang=zh_CN&f=json&ajax=1'
error
=
[
origin
,
url_
,
info_source_code
,
e
,
'biz错误'
,
time_now
]
insertSql
=
f
"insert into WeixinGZH (site_name,site_url,info_source_code,json_error_info,error_type,create_time) values (
%
s,
%
s,
%
s,
%
s,
%
s,
%
s)"
cursor_
.
execute
(
insertSql
,
tuple
(
error
))
cnx_
.
commit
()
continue
fakeid
=
biz
+
'=='
try
:
ip
=
get_proxy
()[
random
.
randint
(
0
,
3
)]
json_search
=
s
.
get
(
url_search
,
headers
=
headers
,
proxies
=
ip
,
verify
=
False
)
.
json
()
# , proxies=ip, verify=False
time
.
sleep
(
2
)
url_search
=
f
'https://mp.weixin.qq.com/cgi-bin/appmsg?action=list_ex&begin=5&count=5&fakeid={fakeid}&type=9&query=&token={token}&lang=zh_CN&f=json&ajax=1'
try
:
ip
=
get_proxy
()[
random
.
randint
(
0
,
3
)]
json_search
=
s
.
get
(
url_search
,
headers
=
headers
,
proxies
=
ip
,
verify
=
False
)
.
json
()
# , proxies=ip, verify=False
str_t
=
json
.
dumps
(
json_search
)
time
.
sleep
(
2
)
except
:
log
.
info
(
f
'===公众号{origin}请求失败!当前时间:{baseCore.getNowTime(1)}==='
)
# error_text = str(json_search)
json_search
=
''
aa
=
time
.
sleep
(
600
)
log
.
info
(
f
'======等待时间{aa}======='
)
break
try
:
list_all
=
json_search
[
'app_msg_list'
]
except
:
#解析失败的情况
count
+=
1
# (f'{fakeid}:biz错误!')
log
.
info
(
f
'{fakeid}:biz错误!、公众号为{origin}=====当前时间:{baseCore.getNowTime(1)}'
)
biz_error_biz
.
append
(
biz
)
biz_error_origin
.
append
(
origin
)
biz_error_code
.
append
(
info_source_code
)
df_error_biz
=
pd
.
DataFrame
({
'公众号'
:
biz_error_origin
,
'code'
:
biz_error_code
,
'错误biz'
:
biz_error_biz
,
})
excel_name
=
time
.
strftime
(
"
%
Y-
%
m-
%
d"
,
time
.
localtime
())
#原来:
# df_error_biz.to_excel(f'./错误biz/{excel_name}.xlsx', index=False)
#改为:
file_path
=
f
'./错误biz/{excel_name}.xlsx'
if
os
.
path
.
exists
(
file_path
):
pass
else
:
workbook
=
Workbook
()
workbook
.
save
(
file_path
)
workbook
.
close
()
# with pd.ExcelWriter(file_path, engine='xlsxwriter',
# options={'strings_to_urls': False}) as writer:
baseCore
.
writerToExcel
(
df_error_biz
,
file_path
)
# combined_data.to_excel(writer, index=False)
bb
=
time
.
sleep
(
3600
)
log
.
info
(
f
'========当前账号可能被封,等待时长{bb}======'
)
except
:
log
.
error
(
f
'===公众号{origin}请求失败!当前时间:{baseCore.getNowTime(1)}==='
)
rePutIntoR
(
info_source_code
)
continue
#{"base_resp": {"ret": 200003, "err_msg": "invalid session"}}
# TODO:需要判断返回值,根据返回值判断是封号还是biz错误
# {'base_resp': {'err_msg': 'freq control', 'ret': 200013}}========= 封号
# {'base_resp': {'err_msg': 'invalid args', 'ret': 200002}} 公众号biz错误 链接
# 'base_resp': {'err_msg': 'ok', 'ret': 0} 正常
ret
=
json_search
[
'base_resp'
][
'ret'
]
if
ret
==
0
:
pass
elif
ret
==
200013
:
# 重新放入redis
# time.sleep(3600)
# 刷新 暂时用一下方法
rePutIntoR
(
info_source_code
)
log
.
info
(
f
'======该账号被封======='
)
for
i
in
range
(
0
,
6
):
#600,1200,1800,2400,3000,3600
#刷新
log
.
info
(
f
'=============刷新浏览器============='
)
wait_time
=
time
.
sleep
(
600
)
log
.
info
(
f
'=======等待时间{wait_time}秒=====刷新浏览器====='
)
browser_run
=
list_b
[
0
]
browser_run
.
refresh
()
cookie_list
=
browser_run
.
get_cookies
()
cur_url
=
browser_run
.
current_url
token
=
cur_url
.
split
(
'token='
)[
1
]
log
.
info
(
f
'=========当前token:{token}========='
)
cookies
=
{}
for
cookie
in
cookie_list
:
cookies
[
cookie
[
'name'
]]
=
cookie
[
'value'
]
continue
if
list_all
:
str_t
=
json
.
dumps
(
json_search
)
try
:
list_all_info
,
url_news
,
news_title
=
get_info
(
json_search
)
time
.
sleep
(
2
)
count
+=
1
if
len
(
list_all_info
):
for
dic_one
in
list_all_info
:
all
.
append
(
dic_one
)
# df_info = pd.DataFrame(all)
excel_name
=
time
.
strftime
(
"
%
Y-
%
m-
%
d"
,
time
.
localtime
())
try
:
file_path
=
f
'./运行结果/{excel_name}_实时数据.xlsx'
if
os
.
path
.
exists
(
file_path
):
pass
else
:
workbook
=
Workbook
()
workbook
.
save
(
file_path
)
workbook
.
close
()
# df_info.to_excel(f'./运行结果/{excel_name}_实时数据.xlsx', index=False)
# with pd.ExcelWriter(file_path, engine='xlsxwriter',
# options={'strings_to_urls': False}) as writer:
baseCore
.
writerToExcel
(
all
,
file_path
)
# combined_data.to_excel(writer, index=False)
except
:
file_path
=
f
'./运行结果/{excel_name}_2_实时数据.xlsx'
if
os
.
path
.
exists
(
file_path
):
pass
else
:
workbook
=
Workbook
()
workbook
.
save
(
file_path
)
workbook
.
close
()
# df_info.to_excel(f'./运行结果/{excel_name}_2_实时数据.xlsx', index=False)
# with pd.ExcelWriter(file_path, engine='xlsxwriter',
# options={'strings_to_urls': False}) as writer:
baseCore
.
writerToExcel
(
all
,
file_path
)
# combined_data.to_excel(writer, index=False)
# 该公众号的所有文章采集完成
# print(f'{fakeid}:采集成功!')
log
.
info
(
f
'{fakeid}、公众号{origin}:采集成功!、已采集{count}个公众号'
)
else
:
log
.
info
(
f
'{fakeid}、公众号{origin}:{url_news},{news_title}已采集过该文章!、已采集{count}个公众号'
)
except
:
# json解析该公众号成功但采集数据失败
count
+=
1
log
.
info
(
f
'{fakeid}、公众号:{origin}采集失败!!!!!!已采集{count}个公众号'
)
# print(f'{fakeid}:解析失败!!!!!!')
list_error_url
.
append
(
str_t
)
get_error_origin
.
append
(
origin
)
get_error_code
.
append
(
info_source_code
)
excel_name
=
time
.
strftime
(
"
%
Y-
%
m-
%
d"
,
time
.
localtime
())
df_error_url
=
pd
.
DataFrame
({
'公众号:'
:
get_error_origin
,
'code'
:
get_error_code
,
'信息'
:
list_error_url
})
file_path
=
f
'./保存失败/{excel_name}.xlsx'
if
os
.
path
.
exists
(
file_path
):
pass
else
:
workbook
=
Workbook
()
workbook
.
save
(
file_path
)
workbook
.
close
()
# df_error_url.to_excel(f'./保存失败/{excel_name}.xlsx', index=False)
# with pd.ExcelWriter(file_path,engine='xlsxwriter',options={'strings_to_urls':False}) as writer:
baseCore
.
writerToExcel
(
df_error_url
,
file_path
)
# combined_data.to_excel(writer,index=False)
time
.
sleep
(
1
)
continue
elif
ret
==
200002
:
# 公众号链接错误 保存库里 记录错误信息及错误类型
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
error
=
[
origin
,
url_
,
info_source_code
,
str_t
,
'无效biz参数'
,
time_now
]
insertSql
=
f
"insert into WeixinGZH (site_name,site_url,info_source_code,json_error_info,error_type,create_time) values (
%
s,
%
s,
%
s,
%
s,
%
s,
%
s)"
cursor_
.
execute
(
insertSql
,
tuple
(
error
))
cnx_
.
commit
()
log
.
info
(
f
'公众号----{origin}----耗时{baseCore.getTimeCost(start_,time.time())}'
)
continue
elif
ret
==
200003
:
# 无效的session
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
error
=
[
origin
,
url_
,
info_source_code
,
str_t
,
'无效session'
,
time_now
]
insertSql
=
f
"insert into WeixinGZH (site_name,site_url,info_source_code,json_error_info,error_type,create_time) values (
%
s,
%
s,
%
s,
%
s,
%
s,
%
s)"
cursor_
.
execute
(
insertSql
,
tuple
(
error
))
cnx_
.
commit
()
log
.
info
(
f
'公众号----{origin}----耗时{baseCore.getTimeCost(start_, time.time())}'
)
continue
else
:
log
.
info
(
f
'----其他情况-----{json_search}---公众号{origin}------'
)
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
error
=
[
origin
,
url_
,
info_source_code
,
str_t
,
'其他错误'
,
time_now
]
insertSql
=
f
"insert into WeixinGZH (site_name,site_url,info_source_code,json_error_info,error_type,create_time) values (
%
s,
%
s,
%
s,
%
s,
%
s,
%
s)"
cursor_
.
execute
(
insertSql
,
tuple
(
error
))
cnx_
.
commit
()
continue
else
:
# list_all为空
list_all
=
json_search
[
'app_msg_list'
]
try
:
list_all_info
,
num_caiji
=
get_info
(
sid
,
json_search
)
time
.
sleep
(
2
)
if
len
(
list_all_info
)
!=
0
:
count
+=
1
time_end
=
time
.
strftime
(
"
%
Y-
%
m-
%
d_
%
H-
%
M-
%
S"
,
time
.
localtime
())
# print(f'{fakeid}:运行出错!时间为:{time_end}')
log
.
info
(
f
'{fakeid}、公众号{origin}:list_all为空!已采集{count}个公众号、时间为:{time_end}'
)
json_error_biz
.
append
(
fakeid
)
json_error_origin
.
append
(
origin
)
json_error_code
.
append
(
info_source_code
)
df_error_json
=
pd
.
DataFrame
({
'公众号:'
:
json_error_origin
,
'code'
:
json_error_code
,
'信息'
:
json_error_biz
})
file_path
=
f
'./错误文件/{time_end}.xlsx'
if
os
.
path
.
exists
(
file_path
):
pass
else
:
workbook
=
Workbook
()
workbook
.
save
(
file_path
)
workbook
.
close
()
# df_error_json.to_excel(f'./错误文件/{time_end}.xlsx', index=False)
# with pd.ExcelWriter(file_path, engine='xlsxwriter',
# options={'strings_to_urls': False}) as writer:
baseCore
.
writerToExcel
(
df_error_json
,
file_path
)
# combined_data.to_excel(writer, index=False)
time_end
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
log
.
info
(
f
'运行结束,时间为:{time_end}'
)
print
(
f
'运行结束,时间为:{time_end}'
)
df_info
=
pd
.
DataFrame
(
list_all_info
)
excel_name
=
time
.
strftime
(
"
%
Y-
%
m-
%
d"
,
time
.
localtime
())
df_info
.
to_excel
(
f
'./运行结果/{excel_name}_总数据.xlsx'
,
index
=
False
)
list_b
[
0
]
.
refresh
()
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
success
=
[
origin
,
url_
,
info_source_code
,
num_caiji
,
'采集成功'
,
time_now
]
#成功信息保存
insertSql
=
f
"insert into WeixinGZH (site_name,site_url,info_source_code,success_info,success_num,create_time) values (
%
s,
%
s,
%
s,
%
s,
%
s,
%
s)"
cursor_
.
execute
(
insertSql
,
tuple
(
success
))
cnx_
.
commit
()
# 该公众号的所有文章采集完成
log
.
info
(
f
'{fakeid}、公众号{origin}:采集成功!、已采集{count}个公众号、耗时{baseCore.getTimeCost(start_,time.time())}'
)
else
:
log
.
info
(
f
'{fakeid}、公众号{origin}、网址已存在!耗时{baseCore.getTimeCost(start_,time.time())}'
)
except
Exception
as
e
:
# json解析该公众号成功但采集数据失败
count
+=
1
time_now
=
time
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
,
time
.
localtime
())
false
=
[
origin
,
url_
,
info_source_code
,
e
,
'采集失败'
,
time_now
]
# 成功信息保存
insertSql
=
f
"insert into WeixinGZH (site_name,site_url,info_source_code,json_error_info,error_type,create_time) values (
%
s,
%
s,
%
s,
%
s,
%
s,
%
s)"
cursor_
.
execute
(
insertSql
,
tuple
(
false
))
cnx_
.
commit
()
log
.
info
(
f
'{fakeid}、公众号:{origin}采集失败!!!!!!耗时{baseCore.getTimeCost(start_, time.time())}'
)
time
.
sleep
(
2
)
#关闭资源
cnx
.
close
()
cursor
.
close
()
baseCore
.
close
()
comData/weixin_solo/test.py
浏览文件 @
caa3a936
import
pandas
as
pd
def
writeaa
():
detailList
=
[]
aa
=
{
'id'
:
3
,
'name'
:
'qqqwe'
}
detailList
.
append
(
aa
)
writerToExcel
(
detailList
)
#
def writeaa():
#
detailList=[]
#
aa={
#
'id':3,
#
'name':'qqqwe'
#
}
#
detailList.append(aa)
#
writerToExcel(detailList)
# 将数据追加到excel
def
writerToExcel
(
detailList
):
# filename='baidu搜索.xlsx'
# 读取已存在的xlsx文件
existing_data
=
pd
.
read_excel
(
filename
,
engine
=
'openpyxl'
)
# 创建新的数据
new_data
=
pd
.
DataFrame
(
data
=
detailList
)
# 将新数据添加到现有数据的末尾
combined_data
=
existing_data
.
append
(
new_data
,
ignore_index
=
True
)
# 将结果写入到xlsx文件
combined_data
.
to_excel
(
filename
,
index
=
False
)
# def writerToExcel(detailList):
# # filename='baidu搜索.xlsx'
# # 读取已存在的xlsx文件
# existing_data = pd.read_excel(filename,engine='openpyxl')
# # 创建新的数据
# new_data = pd.DataFrame(data=detailList)
# # 将新数据添加到现有数据的末尾
# combined_data = existing_data.append(new_data, ignore_index=True)
# # 将结果写入到xlsx文件
# combined_data.to_excel(filename, index=False)
#
# from openpyxl import Workbook
#
# if __name__ == '__main__':
# filename='test1.xlsx'
# # # 创建一个工作簿
# workbook = Workbook(filename)
# workbook.save(filename)
# writeaa()
from
openpyxl
import
Workbook
if
__name__
==
'__main__'
:
filename
=
'test1.xlsx'
# # 创建一个工作簿
workbook
=
Workbook
(
filename
)
workbook
.
save
(
filename
)
writeaa
()
gpdm
=
'01109.HK'
if
'HK'
in
str
(
gpdm
):
tmp_g
=
str
(
gpdm
)
.
split
(
'.'
)[
0
]
if
len
(
tmp_g
)
==
5
:
gpdm
=
str
(
gpdm
)[
1
:]
print
(
gpdm
)
else
:
pass
编写
预览
Markdown
格式
0%
重试
或
添加新文件
添加附件
取消
您添加了
0
人
到此讨论。请谨慎行事。
请先完成此评论的编辑!
取消
请
注册
或者
登录
后发表评论