提交 80e7804c 作者: 薛凌堃

雅虎财经企业动态

上级 e34838cc
# 雅虎财经企业动态获取 # 雅虎财经企业动态获取
...@@ -5,6 +5,8 @@ import pymysql ...@@ -5,6 +5,8 @@ import pymysql
from kafka import KafkaProducer from kafka import KafkaProducer
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
import sys import sys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
sys.path.append('D:/zzsn_spider/base') sys.path.append('D:/zzsn_spider/base')
import BaseCore import BaseCore
...@@ -13,6 +15,8 @@ import urllib3 ...@@ -13,6 +15,8 @@ import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
baseCore = BaseCore.BaseCore() baseCore = BaseCore.BaseCore()
log = baseCore.getLogger() log = baseCore.getLogger()
r = baseCore.r
taskType = '企业动态/雅虎财经' taskType = '企业动态/雅虎财经'
smart =smart_extractor.SmartExtractor('cn') smart =smart_extractor.SmartExtractor('cn')
...@@ -178,6 +182,10 @@ def scroll(xydm,name,gpdm): ...@@ -178,6 +182,10 @@ def scroll(xydm,name,gpdm):
break break
last_url = last_url_ last_url = last_url_
#采集失败的公众号 重新放入redis
def rePutIntoR(item):
r.rpush('NewsEnterprise:gwqy_socialCode', item)
if __name__ == "__main__": if __name__ == "__main__":
path = r'D:\zzsn_spider\comData\cmd6\chromedriver.exe' path = r'D:\zzsn_spider\comData\cmd6\chromedriver.exe'
...@@ -185,6 +193,7 @@ if __name__ == "__main__": ...@@ -185,6 +193,7 @@ if __name__ == "__main__":
cnx = pymysql.connect(host='114.116.44.11', user='root', password='f7s0&7qqtK', db='dbScore', charset='utf8mb4') cnx = pymysql.connect(host='114.116.44.11', user='root', password='f7s0&7qqtK', db='dbScore', charset='utf8mb4')
cursor = cnx.cursor() cursor = cnx.cursor()
while True: while True:
# 根据从Redis中拿到的社会信用代码,在数据库中获取对应基本信息 # 根据从Redis中拿到的社会信用代码,在数据库中获取对应基本信息
social_code = baseCore.redicPullData('NewsEnterprise:gwqy_socialCode') social_code = baseCore.redicPullData('NewsEnterprise:gwqy_socialCode')
...@@ -214,10 +223,13 @@ if __name__ == "__main__": ...@@ -214,10 +223,13 @@ if __name__ == "__main__":
takeTime = baseCore.getTimeCost(start_time, time.time()) takeTime = baseCore.getTimeCost(start_time, time.time())
baseCore.recordLog(xydm, taskType, state, takeTime, '', exception) baseCore.recordLog(xydm, taskType, state, takeTime, '', exception)
continue continue
try:
url = f"https://finance.yahoo.com/quote/{gpdm}/press-releases?p={gpdm}" url = f"https://finance.yahoo.com/quote/{gpdm}/press-releases?p={gpdm}"
driver.get(url) driver.get(url)
try: try:
WebDriverWait(driver, 15).until(EC.visibility_of_element_located((By.ID, 'summaryPressStream-0-Stream')))
news_div = driver.find_element(By.ID, 'summaryPressStream-0-Stream') news_div = driver.find_element(By.ID, 'summaryPressStream-0-Stream')
news_div.find_element(By.TAG_NAME, 'a')
except Exception as e: except Exception as e:
log.error(f"{name}--{gpdm}--没找到新闻元素") log.error(f"{name}--{gpdm}--没找到新闻元素")
exception = '没找到新闻元素' exception = '没找到新闻元素'
...@@ -232,16 +244,30 @@ if __name__ == "__main__": ...@@ -232,16 +244,30 @@ if __name__ == "__main__":
log.error(f"{name}--{gpdm}--拖拽出现问题") log.error(f"{name}--{gpdm}--拖拽出现问题")
news_lis = news_div.find_elements(By.XPATH, "./ul/li") news_lis = news_div.find_elements(By.XPATH, "./ul/li")
log.info(f"{name}--{gpdm}--{len(news_lis)}条信息") log.info(f"{name}--{gpdm}--{len(news_lis)}条信息")
#标识符 判断脚本是否断开连接
flag = 0
for i in range(0, len(news_lis)): for i in range(0, len(news_lis)):
try: try:
try:
a_ele = news_lis[i].find_element(By.XPATH, "./div[1]/div[1]/div[2]/h3[1]/a") a_ele = news_lis[i].find_element(By.XPATH, "./div[1]/div[1]/div[2]/h3[1]/a")
except:
a_ele = news_lis[i].find_element(By.XPATH, "./div[1]/div[1]/div[1]/h3[1]/a")
except Exception as e: except Exception as e:
if news_lis[i].is_displayed():
log.error(f"{name}--{gpdm}--{i}----a标签没找到") log.error(f"{name}--{gpdm}--{i}----a标签没找到")
exception = 'a标签没找到' exception = 'a标签没找到'
state = 0 state = 0
takeTime = baseCore.getTimeCost(start_time, time.time()) takeTime = baseCore.getTimeCost(start_time, time.time())
baseCore.recordLog(xydm, taskType, state, takeTime, url, exception) baseCore.recordLog(xydm, taskType, state, takeTime, url, exception)
continue continue
else:
log.error(f"{name}--{gpdm}--{i}----与网站断开连接")
#todo:重新放入redis
rePutIntoR(xydm)
time.sleep(300)
flag = 1
break
news_url = a_ele.get_attribute("href").lstrip().strip().replace("'", "''") news_url = a_ele.get_attribute("href").lstrip().strip().replace("'", "''")
if (news_url.startswith("https://finance.yahoo.com")): if (news_url.startswith("https://finance.yahoo.com")):
pass pass
...@@ -268,12 +294,23 @@ if __name__ == "__main__": ...@@ -268,12 +294,23 @@ if __name__ == "__main__":
baseCore.recordLog(xydm, taskType, state, takeTime, news_url, exception) baseCore.recordLog(xydm, taskType, state, takeTime, news_url, exception)
log.info(f"{name}--{gpdm}--{i}----{news_url}") log.info(f"{name}--{gpdm}--{i}----{news_url}")
if flag==1:
continue
log.info(f"{name}--{gpdm}--企业整体,耗时{baseCore.getTimeCost(start_time, time.time())}") log.info(f"{name}--{gpdm}--企业整体,耗时{baseCore.getTimeCost(start_time, time.time())}")
# 信息采集完成后将该企业的采集次数更新 # 信息采集完成后将该企业的采集次数更新
runType = 'NewsRunCount' runType = 'NewsRunCount'
count += 1 count += 1
baseCore.updateRun(social_code, runType, count) baseCore.updateRun(social_code, runType, count)
except:
rePutIntoR(xydm)
state = 0
takeTime = baseCore.getTimeCost(start_time, time.time())
baseCore.recordLog(xydm, taskType, state, takeTime, '', '远程主机强迫关闭了一个现有的连接。')
log.info(f"-------{name}--{gpdm}---'远程主机强迫关闭了一个现有的连接。'--------")
log.info('===========连接已被关闭========等待重新连接===========')
time.sleep(1200)
continue
cursor.close() cursor.close()
cnx.close() cnx.close()
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论