提交 57e944a7 作者: XveLingKun

福布斯榜单

上级 cb936819
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
import json
import json
import time
import pymongo
url = "https://web.archive.org/web/20230702131549/https://www.forbes.com/lists/global2000/"
db_storage = pymongo.MongoClient('mongodb://114.115.221.202:27017/', username='admin', password='ZZsn@9988').ZZSN[
'福布斯企业人数']
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br, zstd',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
'Cookie': 'client_id=53d840cb729d7420b1443fea045a4b1c71c; _gcl_au=1.1.934727975.1688303765; _li_dcdm_c=.com; _lc2_fpi=5fb552a76ef3--01h4bby2zps7e7m7k4va1nmp0x; _swb=8e76883c-49d5-497f-ba08-91910e3f0394; _gid=GA1.2.1372767781.1688303769; notice_gdpr_prefs=0,1,2; _swb_consent_=eyJvcmdhbml6YXRpb25Db2RlIjoiZm9yYmVzIiwicHJvcGVydHlDb2RlIjoid2Vic2l0ZV9zbWFydF90YWciLCJlbnZpcm9ubWVudENvZGUiOiJwcm9kdWN0aW9uIiwiaWRlbnRpdGllcyI6eyJzd2Jfd2Vic2l0ZV9zbWFydF90YWciOiI4ZTc2ODgzYy00OWQ1LTQ5N2YtYmEwOC05MTkxMGUzZjAzOTQifSwianVyaXNkaWN0aW9uQ29kZSI6ImNjcGEiLCJwdXJwb3NlcyI6eyJiZWhhdmlvcmFsX2FkdmVydGlzaW5nIjp7ImFsbG93ZWQiOiJ0cnVlIiwibGVnYWxCYXNpc0NvZGUiOiJjb25zZW50X29wdG91dCJ9LCJhbmFseXRpY3MiOnsiYWxsb3dlZCI6InRydWUiLCJsZWdhbEJhc2lzQ29kZSI6ImNvbnNlbnRfb3B0b3V0In0sImZ1bmN0aW9uYWwiOnsiYWxsb3dlZCI6InRydWUiLCJsZWdhbEJhc2lzQ29kZSI6ImRpc2Nsb3N1cmUifSwicmVxdWlyZWQiOnsiYWxsb3dlZCI6InRydWUiLCJsZWdhbEJhc2lzQ29kZSI6ImRpc2Nsb3N1cmUifX0sImNvbGxlY3RlZEF0IjoxNjg4MzAzNzY5fQ%3D%3D; _ketch_consent_v1_=eyJiZWhhdmlvcmFsX2FkdmVydGlzaW5nIjp7InN0YXR1cyI6ImdyYW50ZWQiLCJjYW5vbmljYWxQdXJwb3NlcyI6WyJiZWhhdmlvcmFsX2FkdmVydGlzaW5nIl19LCJhbmFseXRpY3MiOnsic3RhdHVzIjoiZ3JhbnRlZCIsImNhbm9uaWNhbFB1cnBvc2VzIjpbImFuYWx5dGljcyJdfSwiZnVuY3Rpb25hbCI6eyJzdGF0dXMiOiJncmFudGVkIiwiY2Fub25pY2FsUHVycG9zZXMiOlsicHJvZF9lbmhhbmNlbWVudCJdfSwicmVxdWlyZWQiOnsic3RhdHVzIjoiZ3JhbnRlZCIsImNhbm9uaWNhbFB1cnBvc2VzIjpbImVzc2VudGlhbF9zZXJ2aWNlcyJdfX0%3D; _fbp=fb.0.1688303769818.1241836180; ln_or=eyI0NDUxMDYiOiJkIn0%3D; __qca=P0-1546201114-1688303767140; _cb=CILAkNBqefvRLy7_N; _chartbeat2=.1688303782194.1688303782194.1.CU0yf7BtnWSMBnjN8tDChNq8CLaEIA.1; _hp2_ses_props.657665248=%7B%22z%22%3A1%2C%22ts%22%3A1688307007339%2C%22d%22%3A%22www.forbes.com%22%2C%22h%22%3A%22%2Flists%2Fglobal2000%2F%22%2C%22t%22%3A%22The%20Global%202000%202023%22%2C%22q%22%3A%22%3Fsh%3D7b5ee1385ac0%22%7D; lux_uid=168830375963815056; gaWelcomPageTracked=true; _hp2_id.657665248=%7B%22userId%22%3A%227866808861400171%22%2C%22pageviewId%22%3A%224987331001554425%22%2C%22sessionId%22%3A%222988129651120211%22%2C%22identity%22%3Anull%2C%22trackerVersion%22%3A%224.0%22%7D; AMP_TOKEN=%24NOT_FOUND; _ga=GA1.2.14802614.1688303763; _dc_gtm_UA-5883199-3=1; _gat_UA-5883199-3=1; _ga_DLD85VJ5QY=GS1.1.1688307006.2.1.1688303798.37.0.0; donation-identifier=aab33e1c4e293a8fcd5490465688bb01',
'Priority': 'u=0, i',
'Sec-Ch-Ua': '"Not/A)Brand";v="8", "Chromium";v="126", "Microsoft Edge";v="126"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"Windows"',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0'
}
import requests
from bs4 import BeautifulSoup
proxies = {
'https': 'http://127.0.0.1:1080',
'http': 'http://127.0.0.1:1080',
}
# req = requests.get(headers=headers, url=url, verify=False, proxies=proxies)
# # with open('./a.html','w',encoding='utf-8') as f:
# # f.write(req.text)
# soup = BeautifulSoup(req.text, 'lxml')
# scripts = soup.find_all('script')
# for script in scripts:
# if 'window["f' in script.text and 'ICBC' in script.text:
# break
# with open('./a.txt','w',encoding='utf-8')as f:
# f.write(script.text)
with open('./a.txt', 'r', encoding='utf-8') as f:
dataJson = f.read()
dataJson = json.loads(dataJson)
tableDates = dataJson['tableData']
for tableDate in tableDates[894:]:
uri = tableDate['uri']
rank = tableDate['rank']
organizationName = tableDate['organizationName']
print(uri, rank, organizationName)
href = f'https://web.archive.org/web/20230909091923/https://www.forbes.com/companies/{uri}/?list=global2000'
req = requests.get(headers=headers, url=href, verify=False, proxies=proxies)
soup = BeautifulSoup(req.text,'lxml')
scripts = soup.find_all('script')
# print(scripts)
for script in scripts:
if 'numberOfEmployees' in script.text:
break
else:
continue
# print(f'{rank}--{uri}---not found')
try:
employeesJson = script.text
# print(employeesJson)
employeesJson = json.loads(employeesJson)
numberOfEmployees = employeesJson['numberOfEmployees'].replace(',', '')
except:
numberOfEmployees = '--'
dic = {
'排名':rank,
'企业名称':organizationName,
'员工人数':numberOfEmployees,
}
db_storage.insert_one(dic)
print(f'{rank}==={organizationName}===已入库')
req.close()
time.sleep(1)
\ No newline at end of file
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论