提交 e943c06c 作者: XveLingKun

20241021

上级 6fc56b71
# -*- coding: utf-8 -*-
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from nltk.stem import PorterStemmer
from nltk import pos_tag, ne_chunk
from collections import defaultdict
from heapq import nlargest
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('maxent_ne_chunker')
nltk.download('words')
def generate_summary(text, num_sentences=3):
# 分句
sentences = sent_tokenize(text)
# 分词
words = word_tokenize(text.lower())
# 去除停用词
stop_words = set(stopwords.words("english"))
filtered_words = [word for word in words if word not in stop_words]
# 词频统计
freq_dist = FreqDist(filtered_words)
# 词干化
stemmer = PorterStemmer()
stemmed_words = [stemmer.stem(word) for word in filtered_words]
# 句子得分
sentence_scores = defaultdict(int)
for i, sentence in enumerate(sentences):
for word in word_tokenize(sentence.lower()):
if word in freq_dist:
sentence_scores[i] += freq_dist[word]
# 选择最高得分的句子
summary_sentences = nlargest(num_sentences, sentence_scores, key=sentence_scores.get)
# 生成摘要
summary = ' '.join([sentences[i] for i in sorted(summary_sentences)])
return summary
text = """中国共产党第二十次全国代表大会审查、批准十九届中央纪律检查委员会工作报告。大会充分肯定十九届中央纪律检查委员会的工作。\n\n\n\n大会认为,党的十九大以来,在以习近平同志为核心的党中央坚强领导下,各级纪律检查委员会深入学习贯彻习近平新时代中国特色社会主义思想,认真落实新时代党的建设总要求,坚定贯彻党的自我革命战略部署,深入落实全面从严治党战略方针,忠实履行党章赋予的职责,坚定不移推进党风廉政建设和反腐败斗争,推动新时代纪检监察工作高质量发展,紧紧围绕党和国家工作大局发挥监督保障执行、促进完善发展作用,为全面建成小康社会、实现第一个百年奋斗目标,迈上全面建设社会主义现代化国家新征程、向第二个百年奋斗目标进军提供坚强保障,向党和人民交上了优异答卷。\n\n\n\n大会要求,高举中国特色社会主义伟大旗帜,坚持马克思列宁主义、毛泽东思想、邓小平理论、“三个代表”重要思想、科学发展观,全面贯彻习近平新时代中国特色社会主义思想,贯彻落实党的二十大作出的战略部署,深刻领悟“两个确立”的决定性意义,增强“四个意识”、坚定“四个自信”、做到“两个维护”,弘扬伟大建党精神,坚定不移全面从严治党,深入推进新时代党的建设新的伟大工程,推动完善党的自我革命制度规范体系,坚持以严的基调强化正风肃纪,一体推进不敢腐、不能腐、不想腐,坚决打赢反腐败斗争攻坚战持久战,锻造忠诚干净担当的纪检监察队伍,为全面建设社会主义现代化国家、全面推进中华民族伟大复兴而团结奋斗。"""
# 生成摘要
summary = generate_summary(text)
print(summary)
......@@ -1512,6 +1512,33 @@ if __name__ == "__main__":
# print(aaa)
# aaa = int("07")
# print(aaa)
keywords = "(党建工作)+(生产经营)+(深度融合)"
kwList = getkeywords(keywords)
print(kwList)
\ No newline at end of file
title = "党建论文│工控科产党委“1+2+V”大党建工作格局推动党建工作与生产经营深度融合"
content = "党建工作和深度融合"
keywords = "(浙江|北京)+(尼日利亚|科特迪瓦)+(活动|访问)"
keywords_split = getkeywords(keywords)
print(keywords_split)
tf_title = 0 # 统计当前规则中的关键词在标题中出现的次数
tf_content = 0 # 统计当前规则中的关键词在内容中出现的次数
for kw in keywords_split:
if "+" in kw:
# todo:2024-10-15 关键词需要同时出现 若没有同时出现则分数为0
kws = kw.split("+")
for k in kws:
c_t = str(title).lower().count(k)
c_c = str(content).lower().count(k)
if c_c:
# 如果文章中出现
tf_content += c_c
else:
tf_content = 0
break
if c_t:
tf_title += c_t
else:
tf_title = 0
break
else:
tf_title += str(title).lower().count(kw)
tf_content += str(content).lower().count(kw)
print(tf_title)
print(tf_content)
\ No newline at end of file
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import json
import re
import time
import datetime
import pymongo
import requests
from bs4 import BeautifulSoup
from dateutil.relativedelta import relativedelta
from kafka import KafkaProducer
import urllib3
from retry import retry
from selenium.webdriver.support.wait import WebDriverWait
db_storage = pymongo.MongoClient('mongodb://1.95.69.135:27017/', username='admin', password='ZZsn@9988').ZZSN[
'天眼查登录信息']
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import sys
# sys.path.append('D:\\KK\\zzsn_spider\\base')
sys.path.append('D:\\PycharmProjects\\zzsn\\base')
import BaseCore
baseCore = BaseCore.BaseCore()
cnx_ = baseCore.cnx
cursor_ = baseCore.cursor
cnx = baseCore.cnx_
cursor = baseCore.cursor_
log = baseCore.getLogger()
def sql_11():
sql = "SELECT * FROM sys_base_enterprise WHERE legal_person like '%关联企业%'"
cursor.execute(sql)
data_list = cursor.fetchall()
for data in data_list:
social_code = data[1]
legal_person = data[4]
print(social_code, legal_person)
legal_person_update = legal_person.split('关联企业')[0]
update_sql = "UPDATE sys_base_enterprise SET legal_person = '%s' WHERE social_credit_code = '%s'" %(legal_person_update, social_code)
cursor.execute(update_sql)
cnx.commit()
if __name__ == "__main__":
sql_11()
......@@ -114,7 +114,7 @@ def two_dfsm_mtgc():
info_code = 'IN-20240129-0019'
result_dict = {
'id': '',
'sid': '1751849444877144065',
'sid': '1846847035718762497',
'title': title,
'organ': pub_source,
'origin': '国务院国有资产监督管理委员会',
......
......@@ -125,7 +125,7 @@ def gzyw():
info_code = 'IN-20240129-0002'
result_dict = {
'id':'',
'sid':'1751810519211053058',
'sid':'1846847035718762497',
'title': title,
'organ': pub_source,
'origin': '国务院国有资产监督管理委员会',
......
url = "http://cpc.people.com.cn/GB/64162/64168/448520/index.html"
url = "http://cpc.people.com.cn/GB/64162/64168/448520/index.html"
......@@ -17,11 +17,12 @@ r = redis.Redis(host='114.116.90.53', port=6380, password='RPHZgkDQ4zGJ', db=5)
def getRequest(url):
req = requests.get(url)
print(req)
# print(req)
req.encoding = req.apparent_encoding
soup = BeautifulSoup(req.text, 'html.parser')
# print(soup)
req.close()
return soup
......@@ -58,23 +59,41 @@ if __name__ == "__main__":
# print(list_href)
for a in list_href[1:]:
href = a.get('href')
if "http" in href:
if href:
pass
else:
continue
if "http" in href:
continue
else:
href = "http://cpc.people.com.cn" + href
href = "http://cpc.people.com.cn/20th/n1/2023/0228/c64094-32633204.html"
# href = "http://cpc.people.com.cn/20th/n1/2022/1017/c448334-32546342.html"
if is_member_containing_string(info_code, href):
continue
soup_detail = getRequest(href)
div_part = soup_detail.find('div', class_="text_c")
try:
title = div_part.find('h1').text.strip()
publishDate = div_part.find('p', class_="sou").text.split('来源')[0]
publishDateList = div_part.find_all('p', class_="sou")
publishDate, origin = "", ""
for k in publishDateList:
if len(k.text) > 4:
publishDate = k.text.split('来源')[0]
origin = k.text.split('来源')[1]
break
else:
continue
if publishDate:
pass
else:
continue
year = int(publishDate[:4])
if year < 2023:
continue
print(href)
origin = div_part.find('p', class_="sou").text.split('来源')[1]
# origin = div_part.find('p', class_="sou").text.split('来源')[1]
contentWithTag = div_part.find('div', class_="show_text")
content = contentWithTag.text.strip()
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
......@@ -94,6 +113,10 @@ if __name__ == "__main__":
'createDate': time_now,
}
log.info(dic_info)
if sendKafka(dic_info):
r.sadd(info_code, href)
except Exception as e:
log.info(f"出错 {e} ----- {href}")
time.sleep(3)
......@@ -13,7 +13,7 @@ url=jdbc:mysql://114.115.159.144:3306/caiji?useUnicode=true&characterEncoding=ut
[kafka]
bootstrap_servers=1.95.78.131:9092
topic=keyWordsInfo
groupId=python_google
groupId=python_google1
[selenium]
;chrome_driver=C:\Users\WIN10\DataspellProjects\crawlerProjectDemo\tmpcrawler\cmd100\chromedriver.exe
......
......@@ -34,7 +34,7 @@ from bs4 import BeautifulSoup
class GoogleSpider(object):
def __init__(self, threadId, searchkw, wordsCode, sid, item, bangdan_name):
def __init__(self, threadId, searchkw, wordsCode, sid):
# 创建ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
......@@ -65,8 +65,8 @@ class GoogleSpider(object):
self.wordsCode = wordsCode
self.sid = sid
self.threadId = threadId
self.item = item
self.bangdan_name = bangdan_name
# self.item = item
# self.bangdan_name = bangdan_name
def createDriver(self):
chrome_driver = self.config.get('selenium', 'chrome_driver')
......@@ -202,15 +202,18 @@ class GoogleSpider(object):
lang = ''
try:
lang = self.detect_language(title)
raw_html = self.webDriver(url)
sm = SmartExtractor(lang)
article = sm.extract_by_url(url=url)
article = sm.extract_by_html(raw_html, title)
# article = sm.extract_by_html(raw_html)
content = article.cleaned_text
contentWithTag = article.text
except Exception as e:
try:
lang = self.detect_language(title)
raw_html = self.webDriver(url)
sm = SmartExtractor(lang)
article = sm.extract_by_html(raw_html)
article = sm.extract_by_html(raw_html, title)
content = article.cleaned_text
contentWithTag = article.text
except Exception as e:
......
......@@ -18,6 +18,7 @@ from baseCore import BaseCore
from queue import Queue
import configparser
class GoogleTaskJob(object):
def __init__(self):
# 创建ConfigParser对象
......@@ -33,7 +34,7 @@ class GoogleTaskJob(object):
bootstrap_servers = self.config.get('kafka', 'bootstrap_servers')
# 要订阅的主题
topic = self.config.get('kafka', 'topic')
groupId=self.config.get('kafka', 'groupId')
groupId = self.config.get('kafka', 'groupId')
consumer = KafkaConsumer(topic, group_id=groupId,
bootstrap_servers=[bootstrap_servers],
value_deserializer=lambda m: json.loads(m.decode('utf-8')))
......@@ -41,119 +42,111 @@ class GoogleTaskJob(object):
for record in consumer:
try:
logger.info(f"value:{record.value}")
keymsg=record.value
keymsg = record.value
if keymsg:
break
else:
continue
#print("%s:%d:%d: key=%s value=%s" % (msg.topic, msg.partition, msg.offset, msg.key, msg.value))
# print("%s:%d:%d: key=%s value=%s" % (msg.topic, msg.partition, msg.offset, msg.key, msg.value))
except Exception as e:
logger.info("msg.value error:",e)
logger.info("msg.value error:", e)
except KeyboardInterrupt as e:
keymsg={}
keymsg = {}
finally:
consumer.close()
return keymsg
def getkeyFromredis(self, codeid):
def getkeyFromredis(self,codeid):
kvalue=self.r.get('KEY_WORDS_TO_REDIS::'+codeid)
kvalue=kvalue.decode('utf-8')
kvalue=json.loads(kvalue)
kvalue = self.r.get('KEY_WORDS_TO_REDIS::' + codeid)
kvalue = kvalue.decode('utf-8')
kvalue = json.loads(kvalue)
return kvalue
def getkeywords(self,keywords):
kwList=[]
def getkeywords(self, keywords):
keywords = keywords.replace('(', '(').replace(')', ')')
kwList = []
if ')+(' in keywords:
k1List=keywords.split('+')
kk2=[]
k1List = keywords.split('+')
kk2 = []
for k2 in k1List:
k2=k2.strip("()")
k2List=k2.split('|')
k2 = k2.strip("()")
k2List = k2.split('|')
kk2.append(k2List)
if len(kk2)==2:
if len(kk2) == 2:
result = list(itertools.product(kk2[0], kk2[1]))
elif len(kk2)==3:
result = list(itertools.product(kk2[0], kk2[1],kk2[2]))
elif len(kk2)==4:
result = list(itertools.product(kk2[0], kk2[1],kk2[2],kk2[3]))
elif len(kk2) == 3:
result = list(itertools.product(kk2[0], kk2[1], kk2[2]))
elif len(kk2) == 4:
result = list(itertools.product(kk2[0], kk2[1], kk2[2], kk2[3]))
for res in result:
kwstr=''
kwstr = ''
for kw in res:
kwstr+=kw+"+"
kwstr += kw + "+"
kwList.append(kwstr.strip('+'))
elif '+(' in keywords:
k1List=keywords.split('+')
kk2=[]
k1List = keywords.split('+')
kk2 = []
for k2 in k1List:
k2=k2.strip("()")
k2List=k2.split('|')
k2 = k2.strip("()")
k2List = k2.split('|')
kk2.append(k2List)
if len(kk2)==2:
if len(kk2) == 2:
result = list(itertools.product(kk2[0], kk2[1]))
for res in result:
kwstr=''
kwstr = ''
for kw in res:
kwstr+=kw+"+"
kwstr += kw + "+"
kwList.append(kwstr.strip('+'))
else:
k3=keywords.split("|")
kwList=k3
k3 = keywords.split("|")
kwList = k3
return kwList
def paserKeyMsg(self,keymsg):
def paserKeyMsg(self, keymsg):
num = 1
logger.info('----------')
wordsCode=keymsg['wordsCode']
id=keymsg['id']
wordsCode = keymsg['wordsCode']
id = keymsg['id']
try:
searchEngines=keymsg['searchEngines']
searchEngines = keymsg['searchEngines']
if 'java.util.ArrayList' in searchEngines:
searchEngines = searchEngines[1]
except Exception as e:
searchEngines=[]
kwList=[]
searchEngines = []
kwList = []
if searchEngines:
if '3' in searchEngines:
keyword=keymsg['keyWord']
keymsglist=self.getkeywords(keyword)
if '4' in searchEngines:
keyword = keymsg['keyWord']
keymsglist = self.getkeywords(keyword)
for kw in keymsglist:
kwmsg={
'kw':kw,
'wordsCode':wordsCode,
'sid':id
kwmsg = {
'kw': kw,
'wordsCode': wordsCode,
'sid': id
}
kwList.append(kwmsg)
else:
logger.info('+++++')
keyword=keymsg['keyWord']
keymsglist=self.getkeywords(keyword)
for kw in keymsglist:
kwmsg={
'kw':kw,
'wordsCode':wordsCode,
'sid':id
}
kwList.append(kwmsg)
kwList.append((num, kwmsg))
num += 1
return kwList
def runSpider(self,kwmsg):
searchkw=kwmsg['kw']
wordsCode=kwmsg['wordsCode']
sid=kwmsg['sid']
googleSpider=GoogleSpider(searchkw,wordsCode,sid)
def runSpider(self, threadId, kwmsg):
searchkw = kwmsg['kw']
wordsCode = kwmsg['wordsCode']
sid = kwmsg['sid']
googleSpider = GoogleSpider(threadId, searchkw, wordsCode, sid)
try:
googleSpider.get_page_html()
googleSpider.get_detail_html()
except Exception as e:
logger.info('百度搜索异常'+searchkw)
logger.info('百度搜索异常' + searchkw)
finally:
googleSpider.driver.quit()
logger.info("关键词采集结束!"+searchkw)
logger.info("关键词采集结束!" + searchkw)
if __name__ == '__main__':
# ss='道地西洋参+(销售市场|交易市场|直播带货|借助大会平台|网店|微商|电商|农民博主|推介宣传|高品质定位|西洋参产品经营者加盟|引进龙头企业|西洋参冷风库|建设农旅中心|农产品展销中心|精品民宿|温泉)'
......@@ -161,15 +154,16 @@ if __name__ == '__main__':
# print(keymsglist)
# 创建Redis连接
googleTaskJob=GoogleTaskJob()
baseCore=BaseCore()
logger=baseCore.getLogger()
googleTaskJob = GoogleTaskJob()
baseCore = BaseCore()
logger = baseCore.getLogger()
print('---------------')
while True:
# while True:
for i in range(50):
try:
try:
keymsg=googleTaskJob.getkafka()
kwList=googleTaskJob.paserKeyMsg(keymsg)
keymsg = googleTaskJob.getkafka()
kwList = googleTaskJob.paserKeyMsg(keymsg)
except Exception as e:
logger.info("从kafka拿取信息失败!")
time.sleep(5)
......@@ -178,7 +172,7 @@ if __name__ == '__main__':
# 创建一个线程池,指定线程数量为4
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
# 提交任务给线程池,每个任务处理一个数据
results = [executor.submit(googleTaskJob.runSpider, data) for data in kwList]
results = [executor.submit(googleTaskJob.runSpider, num, data) for num, data in kwList]
# 获取任务的执行结果
for future in concurrent.futures.as_completed(results):
try:
......@@ -190,4 +184,3 @@ if __name__ == '__main__':
logger.info(f"任务执行exception: {e}")
except Exception as e:
logger.info('采集异常')
......@@ -129,8 +129,31 @@ class SmartExtractor:
"""
按HTML采集内容
"""
# todo 1018去除head标签 将textarea标签替换成div标签
from bs4 import BeautifulSoup
html_ = BeautifulSoup(html, 'html.parser')
head_tag = html_.find('head')
if head_tag:
head_tag.decompose()
aside_tag = html_.find('aside')
if aside_tag:
aside_tag.decompose()
textarea_tags = html_.find_all('textarea')
if textarea_tags:
for textarea in textarea_tags:
try:
# 创建一个新的 <div> 标签
div_tag = html_.new_tag('div')
# 将 <textarea> 的内容移动到新的 <div> 标签中
div_tag.string = textarea.string
# 替换 <textarea> 标签
textarea.replace_with(div_tag)
except Exception as e:
continue
# 采集正文:传入html
article = self.goose.extract(raw_html=html)
article = self.goose.extract(raw_html=str(html_))
return self.get_extraction_result(article, link_text)
......
......@@ -826,10 +826,12 @@ class BaseCore:
'create_time': '', 'page_size': '', 'content': ''}
for i in range(0, 3):
try:
response = requests.get(pdf_url, headers=headers, verify=False, timeout=20)
ip_dic = {'https': 'http://127.0.0.1:1080', 'http': 'http://127.0.0.1:1080'}
response = requests.get(pdf_url, headers=headers, proxies=ip_dic, verify=False, timeout=20)
# response = requests.get(pdf_url, verify=False, timeout=20)
file_size = int(response.headers.get('Content-Length'))
break
except:
except Exception as e:
time.sleep(3)
continue
page_size = 0
......
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
......
from baiduSpider import BaiduSpider
from baiduSpider import BaiduSpider
......@@ -2,13 +2,15 @@ from baiduSpider import BaiduSpider
searchkw, wordsCode, sid = '', '', ''
baidu = BaiduSpider(searchkw, wordsCode, sid)
import requests
# url = 'https://baijiahao.baidu.com/s?id=1784907851792547880&wfr=spider&for=pc'
url = 'https://www.163.com/dy/article/IKD3M2P20514IPKH.html'
title = '“一带一路”商学院联盟副秘书长解奕炯:临沂在国际化物流建设中一定能“先行一步”'
# url = 'https://www.thepaper.cn/newsDetail_forward_26661172'
url = 'https://finance.huanqiu.com/article/9CaKrnK5O7o' # 澎湃新闻 虎嗅APP 经济观察网
title = '中国建材集团董事长宋志平:激发和保护企业家精神'
try:
detailurl = url
title = title
content, contentWithTag = baidu.extractorMsg(detailurl, title)
content, contentWithTag,title = baidu.extractorMsg(detailurl, title)
contentWithTag = baidu.rmTagattr(contentWithTag, detailurl)
except Exception as e:
content = ''
......
from lxml import etree
from lxml import etree
from bs4 import BeautifulSoup
from goose3 import Goose
from goose3.text import StopWordsChinese, StopWordsKorean, StopWordsArabic
# 创建 Goose 实例
g = Goose()
goose = Goose({'stopwords_class': StopWordsChinese})
with open(r"C:\Users\EDY\Desktop\2.html", "r", encoding='utf-8') as f:
raw_xml = f.read()
print(type(raw_xml))
article = goose.extract(raw_html=raw_xml)
# 解析 XML
# root = etree.fromstring(raw_xml)
# root = BeautifulSoup(raw_xml, 'html.parser')
# 假设 article 是一个对象,它的 raw_doc 属性是我们刚刚解析得到的 root
# article = type('Article', (object,), {'raw_doc': root})()
# title_element 是我们要查找的元素名称
title_element_list = [
'h1',
'h2',
'h3',
'div',
'span',
'td',
'p',
'title'
]
for title_element in title_element_list:
# 使用 XPath 查询提取所有 title 元素
element_list = article.raw_doc.xpath(f'//{title_element}')
# element_list = article.raw_doc.getroottree().xpath(f'//{title_element}')
# 输出结果
print(element_list)
for element in element_list:
# 取纯文本内容,包括子元素
text = etree.tounicode(element, method='text').strip()
text_no_space = text.replace(" ", "")
print(text_no_space)
from goose3 import Goose
from goose3 import Goose
# 创建 Goose 实例
g = Goose()
# 示例 HTML 字符串
html = """
<html>
<head>
<title>示例文章标题</title>
</head>
<body>
<h1>示例文章标题</h1>
<p>这是文章的第一段。</p>
<p>这是文章的第二段。</p>
</body>
</html>
"""
# 使用 Goose 提取文章内容
article = g.extract(raw_html=html)
# 输出提取的信息
print(article.title) # 输出文章标题
print(article.cleaned_text) # 输出清理后的正文文本
print(article.meta_description) # 输出元描述信息
print(article.publish_date) # 输出发布日期
print(article.top_image) # 输出顶部图片信息
print(article.authors) # 输出作者信息
\ No newline at end of file
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import pandas as pd
import pymongo
from baiduSpider import BaiduSpider
searchkw, wordsCode, sid = '', '', ''
baidu = BaiduSpider(searchkw, wordsCode, sid)
import urllib3
db_storage = pymongo.MongoClient('mongodb://1.95.69.135:27017/', username='admin', password='ZZsn@9988').ZZSN[
'天眼查登录信息']
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import sys
sys.path.append('D:\\PycharmProjects\\zzsn\\base')
import BaseCore
baseCore = BaseCore.BaseCore()
cnx_ = baseCore.cnx
cursor_ = baseCore.cursor
cnx = baseCore.cnx_
cursor = baseCore.cursor_
log = baseCore.getLogger()
def selectSql():
sql = """SELECT
id,
title,
publishDate,
origin,
detailurl,
state,
keyword,
CAST(content AS CHAR),
create_time,
sid,
wordsCode
FROM
baidu_search_result_
WHERE
wordsCode='EVENT_KW-20241017-linshi-0001'"""
cursor_.execute(sql)
return cursor_.fetchall()
if __name__ == "__main__":
resultList = selectSql()
df_list = []
for result in resultList:
id_ = result[0]
title = result[1]
publishDate = result[2]
origin = result[3]
url = result[4]
try:
detailurl = url
content, contentWithTag, title = baidu.extractorMsg(detailurl, title)
contentWithTag = baidu.rmTagattr(contentWithTag, detailurl)
except Exception as e:
content = ''
contentWithTag = ''
detailmsg = {
'title': title,
'detailurl': url,
'content': content,
'contentHtml': contentWithTag,
'origin': origin
}
df_list.append(detailmsg)
df = pd.DataFrame(df_list)
df.to_excel("./测试结果.xlsx", index=False)
\ No newline at end of file
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论