提交 34cbd322 作者: bruxellse_li

python-service

上级 6411f842
# -*- coding: utf-8 -*-
# @Time : 2023/3/27 11:50
# @Author : bruxellse_li
# @File : TextRewriting.py
# @Project : 从word中提取指定表格
from datetime import datetime
from wsgiref.handlers import format_date_time
from time import mktime
import hashlib
import base64
import hmac
from urllib.parse import urlencode
import json
import requests
import ast
'''
1、文本改写 Web API 调用示例
2、运行前:请先填写Appid、APIKey、APISecret 相关信息
'''
class AssembleHeaderException(Exception):
def __init__(self, msg):
self.message = msg
class Url:
def __init__(this, host, path, schema):
this.host = host
this.path = path
this.schema = schema
pass
class work_wsParam(object):
def __init__(self, APPID, APIKey, APISecret, level, url):
self.APPID = APPID
self.APIKey = APIKey
self.APISecret = APISecret
# self.url = 'https://api.xf-yun.com/v1/private/se3acbe7f'
self.url = url
self.level = level
def parse_url(self, requset_url):
stidx = requset_url.index("://")
host = requset_url[stidx + 3:]
schema = requset_url[:stidx + 3]
edidx = host.index("/")
if edidx <= 0:
raise AssembleHeaderException("invalid request url:" + requset_url)
path = host[edidx:]
host = host[:edidx]
u = Url(host, path, schema)
return u
def init_header(self):
headers = {
'content-type': "application/json",
'host': 'api.xf-yun.com'
}
return headers
def get_body(self, text):
data = {
"header": {
"app_id": self.APPID,
"status": 3,
},
"parameter": {
"se3acbe7f": {
"level": self.level,
"result": {
"encoding": "utf8",
"compress": "raw",
"format": "json"
}
}
},
"payload": {
"input1": {
"encoding": "utf8",
"compress": "raw",
"format": "plain",
"status": 3,
"text": str(base64.b64encode(text.encode('utf-8')), 'utf-8')
}
}
}
body = json.dumps(data)
return body
def assemble_ws_auth_url(wsParam, requset_url, method="POST", api_key="", api_secret=""):
u = wsParam.parse_url(requset_url)
# u = parse_url(requset_url)
host = u.host
path = u.path
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
# print(date)
# date = "Thu, 12 Dec 2019 01:57:27 GMT"
signature_origin = "host: {}\ndate: {}\n{} {} HTTP/1.1".format(host, date, method, path)
# print("----2", signature_origin)
signature_sha = hmac.new(api_secret.encode('utf-8'), signature_origin.encode('utf-8'),
digestmod=hashlib.sha256).digest()
signature_sha = base64.b64encode(signature_sha).decode(encoding='utf-8')
authorization_origin = "api_key=\"%s\", algorithm=\"%s\", headers=\"%s\", signature=\"%s\"" % (
api_key, "hmac-sha256", "host date request-line", signature_sha)
# print("----1:", authorization_origin)
authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8')
# print(authorization_origin)
values = {
"host": host,
"date": date,
"authorization": authorization
}
return requset_url + "?" + urlencode(values)
def get_result(wsParam, text):
request_url = assemble_ws_auth_url(wsParam, wsParam.url, "POST", wsParam.APIKey, wsParam.APISecret)
# request_url = assemble_ws_auth_url(url, "POST", APIKey, APISecret)
# print("request_url:", request_url)
response = requests.post(request_url, data=wsParam.get_body(text), headers=wsParam.init_header())
# print("response:", response)
str_result = response.content.decode('utf8')
json_result = json.loads(str_result)
# print("response-content:", json_result)
if json_result. __contains__('header') and json_result['header']['code'] == 0:
renew_text = json_result['payload']['result']['text']
# print("\n改写结果:", str(base64.b64decode(renew_text), 'utf-8'))
result_text = str(base64.b64decode(renew_text), 'utf-8')
trans_result = ast.literal_eval(result_text)
str_result = trans_result[0][0]
else:
str_result = "改写失败!请检查服务是否断开"
# 改写结果保存到文件
# result_file = open(".\改写结果.txt",'w',encoding='utf-8')
# result_file.write(str(base64.b64decode(renew_text), 'utf-8'))
return str_result
def get_list_result(text):
APPID = "51718e1a"
APISecret = "ZTAwYjcyZTRlZTQ3M2FmY2RlMDZiYjEx"
APIKey = "ec11462dcbb1d8d5d8ec15612f7243e7"
url = 'https://api.xf-yun.com/v1/private/se3acbe7f'
# level = "<L3>" # 改写等级 <L1> ~ <L6> 等级越高,改写程度越深
level_list = ["<L2>", "<L3>", "<L4>"]
result_one = []
for level in level_list:
wsParam = work_wsParam(APPID, APIKey, APISecret, level, url)
text_one = get_result(wsParam, text)
result_one.append(text_one)
return result_one
if __name__ == "__main__":
text = "2021年,本单位资产实行分类管理,建立健全了资产内部管理制度;单位加强对实物资产和无形资产的管理," \
"明确相关部门和岗位的职责权限,强化对配置、使用和处置等关键环节的管控;明确资产使用和保管责任人,落实资产使用人在资产管理中的责任。"
print(get_list_result(text))
# # APPID = "51718e1a"
# # APISecret = "ZTAwYjcyZTRlZTQ3M2FmY2RlMDZiYjEx"
# # APIKey = "ec11462dcbb1d8d5d8ec15612f7243e7"
# # url = 'https://api.xf-yun.com/v1/private/se3acbe7f'
# # level = "<L3>" #改写等级 <L1> ~ <L6> 等级越高,改写程度越深
# text = "随着我国城市化脚步的不断加快,园林工程建设的数量也在不断上升,城市对于园林工程的质量要求也随之上升,然而就当前我国园" \
# "林工程管理的实践而言,就园林工程质量管理这一环节还存在许多不足之处,本文在探讨园林工程质量内涵的基础上,深入进行质量" \
# "管理策略探讨,目的是保障我国园林工程施工质量和提升整体发展效率。"
#
# get_result(text)
# wsParam = work_wsParam(APPID, APIKey, APISecret, level, url)
# request_url = assemble_ws_auth_url(wsParam.url, "POST", wsParam.APIKey, wsParam.APISecret)
# print(get_result(text, request_url))
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/7/31 10:21
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/21 9:30
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : base_app
# @Author : LiuYan
# @Time : 2021/4/21 9:30
import json
from flask import Flask, Blueprint, request, jsonify, render_template
from flask_cors import *
from base.config.base_config import DB_URI
from utils.log import logger
app = Flask(__name__, static_url_path='', static_folder='../../static', template_folder='../../static')
CORS(app, supports_credentials=True)
app.config['SQLALCHEMY_DATABASE_URI'] = DB_URI
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/4/16 18:03
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : base_config
# @Author : LiuYan
# @Time : 2021/4/16 18:06
from abc import abstractmethod, ABC
# root_dir = '/home/zzsn/liuyan/zzsn_gurag'
root_dir = '..' # deploy
DIALECT = 'mysql'
DRIVER = 'pymysql'
USERNAME = 'root'
PASSWORD = 'zzsn9988'
HOST = '39.105.62.235'
PORT = '3306'
DATABASE = 'gurag'
# 连接数据的URI
DB_URI = '{}+{}://{}:{}@{}:{}/{}?charset=utf8'.format(DIALECT, DRIVER, USERNAME, PASSWORD, HOST, PORT, DATABASE)
SQLALCHEMY_DATABASE_URI = DB_URI
SQLALCHEMY_TRACK_MODIFICATIONS = True
SWAGGER_TITLE = 'API'
SWAGGER_DESC = 'API接口'
# 地址,必须带上端口号
SWAGGER_HOST = '0.0.0.0:7010'
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/12/1 10:23
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : base_dao
# @Author : LiuYan
# @Time : 2021/12/1 10:23
from abc import abstractmethod, ABC
from flask_sqlalchemy import SQLAlchemy
from base.app.base_app import app
db = SQLAlchemy(app)
class BaseDao(ABC):
@abstractmethod
def __init__(self):
super(BaseDao, self).__init__()
@abstractmethod
def load_config(self):
"""
Add the config you need.
:return: config(YamlDict)
"""
pass
# -*- coding: utf-8 -*-
# @Time : 2023/3/7 17:29
# @Author : ctt
# @File : copy_table
# @Project : 表格复制
from copy import deepcopy
from docx import Document
"""
prep_p = p.insert_paragraph_before("段落前插入内容)
document.add_page_break() # 插入分页符
"""
import re
import json
import pandas as pd
from docx import Document
from docx.document import Document as _Document
from docx.oxml.text.paragraph import CT_P
from docx.oxml.table import CT_Tbl
from docx.table import _Cell, Table, _Row
from docx.text.paragraph import Paragraph
from docx.enum.section import WD_SECTION_START
from docx.enum.text import WD_BREAK
import docx
def iter_block_items(parent):
"""
Generate a reference to each paragraph and table child within *parent*,
in document order. Each returned value is an instance of either Table or
Paragraph. *parent* would most commonly be a reference to a main
Document object, but also works for a _Cell object, which itself can
contain paragraphs and tables.
"""
if isinstance(parent, _Document):
parent_elm = parent.element.body
elif isinstance(parent, _Cell):
parent_elm = parent._tc
elif isinstance(parent, _Row):
parent_elm = parent._tr
else:
raise ValueError("something's not right")
for child in parent_elm.iterchildren():
if isinstance(child, CT_P):
yield Paragraph(child, parent)
elif isinstance(child, CT_Tbl):
yield Table(child, parent)
def parase_table(table):
out_df = pd.DataFrame()
for i, row in enumerate(table.rows[:]):
row_content = []
for cell in row.cells:
c = cell.text.strip()
row_content.append(c)
out_df = pd.concat([out_df, pd.DataFrame(row_content)], axis=1, ignore_index=True)
return out_df.T
def get_table_position(para):
pattern = re.compile(r'(?<={{).*?(?=}})')
match = pattern.findall(para.text)
if match:
return match
return False
def get_choose_table(document, table_names: list):
'''
:param document:
:param table_names: 要提取的表格名称
:return: {'表名': [表1, 表1续表]}
'''
table_names_rule = '|'.join(table_names)
table_names_data = {}
[table_names_data.update({key: []}) for key in table_names]
dw_pattern = re.compile(r''+table_names_rule)
i = 1
for block in iter_block_items(document):
# 处理段落
if isinstance(block, Paragraph):
dw = dw_pattern.findall(block.text)
# 通过字符串匹配找到目标表格位置,并复制表格
elif isinstance(block, Table) and dw:
new_table = deepcopy(block.table)
table_names_data[dw[0]].append(new_table._element)
# 处理包含目标信息的表格(表头包含目标信息)
elif isinstance(block, Table):
# 按行解析表格并存储成df格式
table_df = parase_table(block.table)
if table_df[0][0] in table_names:
# print(table_names_data[table_df[0][0]])
new_table = deepcopy(block.table)
# print(new_table._element)
table_names_data[table_df[0][0] + "续表" + str(i)] = [new_table._element]
i += 1
# table_names_data[table_df[0][0]].append(new_table._element)
# print(table_names_data[table_df[0][0]])
return table_names_data
def new_document():
document = Document()
# 文档添加段落
para = document.add_paragraph()
# 在段落后面追加文本
# run = para.add_run()
# run.add_break()
return para._p
def generate_report(table_names_data, save_path, template_path, tables_dict):
document = Document(template_path)
pattern = re.compile(r'(?<={{).*?(?=}})')
# block 块对象主要包括标题、段落、图片、表、列表
# run 内联对象为块对象的组成部分,块对象的所有内容都包含在内联对象中,一个块对象由一个或多个内联对象组成。修改字体、字号、文字颜色需要用到run
# for block in iter_block_items(document):
for block in document.paragraphs:
if isinstance(block, Paragraph):
match = pattern.findall(block.text)
if match and "table" in match[0]:
table_name = match[0]
for _ in table_names_data[tables_dict[table_name]]:
# white_row = new_document()
# 在XML 级别上进行操作,即在元素之后直接添加内容,将任何尾部文本移动到新插入的元素后面,目的是使得新元素成为紧随其后的兄弟元素
# block._p.addnext(white_row)
block._p.addnext(_)
p = block._element
p.getparent().remove(p)
block._p = block._element = None
# 清除模板定义中的续表
pattern_clear = re.compile(r'(?<=续表)[0-9]')
for block in iter_block_items(document):
if isinstance(block, Paragraph):
match = pattern_clear.findall(block.text)
if match:
p = block._element
p.getparent().remove(p)
block._p = block._element = None
document.save(save_path)
if __name__ == '__main__':
import datetime
start_time = datetime.datetime.now()
# 参数:tables_dict、docx_file、save_path、template_path
tables_dict = {
"table13": "以名义金额计量的资产名称、数量等情况,以及以名义金额计量理由的说明",
"table5": "收入费用表(2)",
"table4": "收入费用表(1)",
"table3": "资产负债表续表2",
"table2": "资产负债表续表1",
"table1": "资产负债表",
"table9": "(17)其他应付款明细信息如下:",
"table8": "(9)无形资产明细信息如下:",
"table10": "(24)其他收入明细信息如下:",
"table7": "(7)固定资产明细信息如下:",
"table11": "(25)业务活动费用明细信息如下:",
"table6": "(1)货币资金明细信息如下:",
"table12": "(28)商品和服务费用明细信息如下:"
}
# tables_dict = {'table1': '资产负债表', 'table2': '资产负债表续表1', 'table3': '资产负债表续表2', 'table4': '收入费用表(1)', 'table5': '收入费用表(2)', 'table6': '(1)货币资金明细信息如下:',
# "table7": "(7)固定资产明细信息如下:", "table8": "(9)无形资产明细信息如下:", "table9": "(17)其他应付款明细信息如下:", "table10": "(24)其他收入明细信息如下:",
# "table11": "(25)业务活动费用明细信息如下:", "table12": "(28)商品和服务费用明细信息如下:", }
docx_file = r'data/3月23测试半成品.docx'
document = Document(docx_file)
data_result = get_choose_table(document, list(tables_dict.values()))
print(data_result)
generate_report(data_result, save_path=r'data/报告文件.docx', template_path=r'data/new_财务报告模板.docx', tables_dict=tables_dict)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/12/1 10:11
from base.dao.base_dao import db
from dao.user import User
from dao.dataset import Dataset
from dao.dataset_field import DatasetField
from dao.template import Template
db.create_all()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : dataset
# @Author : LiuYan
# @Time : 2021/12/1 17:10
import datetime
from base.dao.base_dao import db
user_dataset_table = db.Table('user_dataset',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),
db.Column('dataset_id', db.Integer, db.ForeignKey('dataset.id'), primary_key=True))
class Dataset(db.Model):
"""
user -- dataset --> ManyToMany
"""
__tablename__ = 'dataset'
id = db.Column(db.Integer, primary_key=True, autoincrement=True) # *_主键id
project_name = db.Column(db.String(50), nullable=False) # *_项目标识
topic_name = db.Column(db.String(50), nullable=True) # __专题标识
datasource_name = db.Column(db.String(50), nullable=True) # __数据源标识
dataset_name = db.Column(db.String(50), nullable=False) # *_数据集名称
dataset_type = db.Column(db.Integer, nullable=False) # *_数据集类型 0: Dict | 1: List[Dict]
dataset_describe = db.Column(db.String(5000), nullable=False) # *_数据集描述
dataset_source_type = db.Column(db.Integer, nullable=False) # *_数据集来源类型 0: 业务API注册 | 1: 数据库SQL注册
dataset_url = db.Column(db.String(500), nullable=False) # *_数据集请求API接口
parameter = db.Column(db.String(500), nullable=True) # __API接口请求参数
database_type = db.Column(db.Integer, nullable=False) # *_数据库类型 0: mysql | 1: oracle | -1: (数据集来源类型为0)
database_config = db.Column(db.JSON, nullable=False) # *_数据库连接配置
database_sql = db.Column(db.String(5000), nullable=False) # *_数据库查询语句
status = db.Column(db.Integer, nullable=True) # __注册状态 0: 失败 1: 成功
create_by = db.Column(db.String(50), nullable=True) # __创建人
create_time = db.Column(db.DateTime, default=datetime.datetime.now()) # __创建日期
update_by = db.Column(db.String(50), nullable=True) # __更新人
update_time = db.Column(db.DateTime, onupdate=datetime.datetime.now()) # __更新日期
users = db.relationship('User', secondary=user_dataset_table, backref='users')
# 联合唯一索引
__table_args__ = (
db.UniqueConstraint('project_name', 'dataset_name', name='juc_project_name_dataset_name'),
)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : dataset_field
# @Author : LiuYan
# @Time : 2021/12/1 17:22
import datetime
from base.dao.base_dao import db
class DatasetField(db.Model):
"""
Dataset -- DatasetField -> OneToMany
"""
__tablename__ = 'dataset_field'
id = db.Column(db.Integer, primary_key=True, autoincrement=True) # *_主键id
field_name = db.Column(db.String(50), nullable=False) # *_字段名
field_type = db.Column(db.Integer, nullable=False) # *_字段类型 0: Number 1: String
field_describe = db.Column(db.String(5000), nullable=False) # *_字段描述
status = db.Column(db.Integer, nullable=True) # __注册状态 0: 失败 1: 成功
create_by = db.Column(db.String(50), nullable=True) # __创建人
create_time = db.Column(db.DateTime, default=datetime.datetime.now()) # __创建日期
update_by = db.Column(db.String(50), nullable=True) # __更新人
update_time = db.Column(db.DateTime, onupdate=datetime.datetime.now()) # __更新日期
dataset_id = db.Column(db.Integer, db.ForeignKey('dataset.id'), nullable=False)
dataset = db.relationship('Dataset', backref='dataset_fields') # 关联关系,不是字段
# 联合唯一索引
__table_args__ = (
db.UniqueConstraint('field_name', 'dataset_id', name='juc_field_name_dataset_id'),
)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : template
# @Author : LiuYan
# @Time : 2021/12/1 17:13
import datetime
from base.dao.base_dao import db
class Template(db.Model):
"""
user -- template --> OneToMany
"""
__tablename__ = 'template'
id = db.Column(db.Integer, primary_key=True, autoincrement=True) # *_主键id
project_name = db.Column(db.String(50), nullable=False) # *_项目名称
topic_name = db.Column(db.String(50), nullable=True) # __专题名称
datasource_name = db.Column(db.String(50), nullable=True) # __数据源名称
template_name = db.Column(db.String(50), nullable=False) # *_模板/报告名
template_path = db.Column(db.String(500), nullable=False) # *_模板路径
template_describe = db.Column(db.String(5000), nullable=False) # *_模板描述
create_by = db.Column(db.String(50), nullable=True) # __创建人
create_time = db.Column(db.DateTime, default=datetime.datetime.now()) # __创建日期
update_by = db.Column(db.String(50), nullable=True) # __更新人
update_time = db.Column(db.DateTime, onupdate=datetime.datetime.now()) # __更新日期
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/12/1 11:51
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : role
# @Author : LiuYan
# @Time : 2021/12/1 11:25
from base.dao.base_dao import db
class Role(db.Model):
__tablename__ = '_role'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : student
# @Author : LiuYan
# @Time : 2021/12/1 15:47
from base.dao.base_dao import db
class Student(db.Model):
__tablename__ = '_student'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), nullable=False)
age = db.Column(db.Integer, nullable=True)
if __name__ == '__main__':
"""
单表创建 增删改查
"""
# create
db.create_all()
# add
# student1 = Student(name='yan')
# student2 = Student(name='ying', age=18)
# db.session.add_all([student1, student2])
# db.session.commit()
# query
# list_student = Student.query.all()
# print(list_student)
# for student in list_student:
# print(student.id, student.name, student.age)
# update
# student = Student.query.filter(Student.name == 'yan').first()
# student.age = 20
# db.session.commit()
# delete
# student = Student.query.filter(Student.name == 'yan').first()
# db.session.delete(student)
# db.session.commit()
student = Student.query.filter(Student.name == 'yan')
for s in student:
db.session.delete(s)
db.session.commit()
pass
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : test_dao
# @Author : LiuYan
# @Time : 2021/12/1 11:51
from dao.test.role import Role
from dao.test.user import User
from base.dao.base_dao import db
if __name__ == '__main__':
"""
一对多表 创建 增删改查
"""
# create
db.create_all()
# add
new_user = User(name='liuyan')
db.session.add(new_user)
db.session.commit()
list_user = User.query.all()
print(list_user)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : user
# @Author : LiuYan
# @Time : 2021/12/1 10:15
from base.dao.base_dao import db, BaseDao
class User(db.Model):
__tablename__ = '_user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), nullable=False)
age = db.Column(db.Integer, nullable=True)
role_id = db.Column(db.Integer, db.ForeignKey('_role.id'), nullable=False)
role = db.relationship('Role', backref='Users')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : user
# @Author : LiuYan
# @Time : 2021/12/1 17:13
import datetime
from base.dao.base_dao import db
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True) # *_主键id
user_name = db.Column(db.String(50), nullable=False) # *_客户标识
project_name = db.Column(db.String(50), nullable=False) # *_项目标识
topic_name = db.Column(db.String(50), nullable=True) # __专题标识
create_by = db.Column(db.String(50), nullable=True) # __创建人
create_time = db.Column(db.DateTime, default=datetime.datetime.now()) # __创建日期
update_by = db.Column(db.String(50), nullable=True) # __更新人
update_time = db.Column(db.DateTime, onupdate=datetime.datetime.now()) # __更新日期
template = db.relationship('Template', backref='user') # 关联关系,不是字段
# connect timeout in seconds
# default value is 30s
connect_timeout=30
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store log files
#base_path=/home/tarena/django-project/cc_shop1/cc_shop1/logs
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=114.115.215.96:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# if load FastDFS parameters from tracker server
# since V4.05
# default value is false
load_fdfs_parameters_from_tracker=false
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V4.05
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V4.05
storage_ids_filename = storage_ids.conf
#HTTP settings
http.tracker_server_port=4000
#use "#include" directive to include HTTP other settiongs
##include http.conf
\ No newline at end of file
# -*- coding: utf-8 -*-
# @Time : 2023/2/25 10:51
# @Author : ctt
# @File : 文本内容提取
# @Project : untitled1
import re
from docx import Document
import pandas as pd
class Other_Extract:
def __init__(self):
self.unitName_pattern = re.compile(r'(?<=部门(单位)名称:).*?(\n)')
self.unitChargePeople_pattern = re.compile(r'(?<=单位负责人:).*?(\n)')
self.financeChargePeople_pattern = re.compile(r'(?<=财务负责人:).*?(\n)')
self.filledPeople_pattern = re.compile(r'(?<=编制人:).*?(\n)')
self.year_pattern = re.compile(r'(?<=报送日期:).*?(\n)')
@staticmethod
def match(pattern, text):
pattern_group = pattern.search(text)
if pattern_group:
return pattern_group.group().strip()
return None
def extract_other_result(self, text):
unitName = self.match(self.unitName_pattern, text)
unitChargePeople = self.match(self.unitChargePeople_pattern, text)
financeChargePeople = self.match(self.financeChargePeople_pattern, text)
filledPeople = self.match(self.filledPeople_pattern, text)
year = self.match(self.year_pattern, text)
return {'unitName': unitName,
'unitChargePeople': unitChargePeople,
'financeChargePeople': financeChargePeople,
'filledPeople': filledPeople,
'year': year}
class Extract:
# {“主要职能”:””, “机构情况”:””, “人员情况”:””, “当年取得的主要事业成效”}
def __init__(self):
# self.main_functions = re.compile(r'(?<=[0-9][\..]主要职能[。\n])(.|\n)*?(?=[0-9][\..]机构情况[。\n])')
self.main_functions = re.compile(r'(?<=[0-9][\..]主要职能[。\n])(.|\n)*?(?=([一二三四五六七八九十])当年取得的主要事业成效[。\n])')
# self.institutional_situation = re.compile(r'(?<=[0-9][\..]机构情况[。\n])(.|\n)*?(?=[0-9][\..]人员情况[。\n])')
# self.personnel_situation = re.compile(r'(?<=[0-9][\..]人员情况[。\n])(.|\n)*?(?=([一二三四五六七八九十])当年取得的主要事业成效[。\n])')
self.business_results = re.compile(r'(?<=([一二三四五六七八九十])当年取得的主要事业成效[。\n])(.|\n)*?(?=[一二三四五六七八九十]、收入支出预算执行情况分析)')
# self.patterns = [self.main_functions, self.institutional_situation, self.personnel_situation, self.business_results]
@staticmethod
def match(pattern, text):
pattern_group = pattern.search(text)
if pattern_group:
return pattern_group.group().strip()
return None
def extract_result(self, text):
main_functions = self.match(self.main_functions, text)
# institutional_situation = self.match(self.institutional_situation, text)
# personnel_situation = self.match(self.personnel_situation, text)
business_results = self.match(self.business_results, text)
return {'主要职能': main_functions,
# '机构情况': institutional_situation,
# '人员情况': personnel_situation,
'当年取得的主要事业成效': business_results}
def get_text_from_docx(filepath):
'''
获取word文档的所有文本内容
:param filepath:
:return:
'''
document = Document(filepath)
contents = []
for paragraph in document.paragraphs:
if '<w:numPr>' in paragraph._element.xml:
contents.append('1.'+paragraph.text)
contents.append('\n')
else:
contents.append(paragraph.text)
contents.append('\n')
return ''.join(contents)
def get_cover_content_from_docx(filepath):
'''
获取word文档的所有文本内容
:param filepath:
:return:
'''
document = Document(filepath)
contents = []
# 第一步遍历段落存储信息
for paragraph in document.paragraphs:
if '<w:numPr>' in paragraph._element.xml:
contents.append('1.' + paragraph.text)
contents.append('\n')
else:
contents.append(paragraph.text)
contents.append('\n')
# 第二步取前15段获取封面标题信息
target_content = []
for content in contents[:14]:
if content.replace("\xa0", "").strip():
target_content.append(content.strip())
# print(contents[14:35])
# 第三步取15段获取其它信息
other_content = []
for temp_content in contents[14:35]:
if temp_content.replace("\xa0", "").strip():
other_content.append(temp_content.strip())
other_content.append('\n')
return "".join(target_content), ''.join(other_content)
if __name__ == '__main__':
new_path = "data/2022年度安岳县元坝镇人民政府部门决算分析报告(1).docx"
document = get_text_from_docx(new_path)
data = Extract().extract_result(document)
print(data)
# fifth_area_pattern = re.compile(r'(?<=[0-9][\..]会计报表重要项目的明细信息及说明[。\n])(.|\n)*?(?=[0-9][\..]需要说明的其他事项[。\n])')
# filepath = "wKjIbGQeSb6AUq1aAAgAABcLaMw312.docx"
# document = Document(filepath)
# documents = get_text_from_docx(filepath)
#
# area_group = fifth_area_pattern.search(documents)
# if area_group:
# area_text = area_group.group().strip("1.").strip()
# else:
# area_text = ""
#
# print(area_text)
# cover_contents, other_contents = get_cover_content_from_docx(filepath)
# cover_pattern = re.compile(r"([0-9]{0,4}).*(?=(财务报告))")
#
# # print(content)
# cover_group = cover_pattern.search(cover_contents)
# if cover_group:
# cover_text = cover_group.group().strip()
# else:
# cover_text = ""
#
# other_extract = Other_Extract()
# other_data = other_extract.extract_other_result(other_contents)
# other_data["reportTitle"] = cover_text
# print(other_data)
# extract = Extract()
# # path = r'D:\四川报告\相关代码\四川报告之文本内容提取\data'
# path = "data/temp.docx"
# result = extract.extract_result(path)
# print(result)
# for file in os.listdir(path):
# if file[-4:] == 'docx':
# filepath = os.path.join(path, file)
# paras = get_text_from_docx(filepath)
# print(paras)
# result = extract.extract_result(paras)
# print(result)
\ No newline at end of file
# -*- coding: utf-8 -*-
# @Time : 2023/2/17 11:57
# @Author : ctt
# @File : extract_table
# @Project : 从word中提取指定表格
import re
import json
import pandas as pd
from docx import Document
from docx.document import Document as _Document
from docx.oxml.text.paragraph import CT_P
from docx.oxml.table import CT_Tbl
from docx.table import _Cell, Table, _Row
from docx.text.paragraph import Paragraph
def iter_block_items(parent):
"""
Generate a reference to each paragraph and table child within *parent*,
in document order. Each returned value is an instance of either Table or
Paragraph. *parent* would most commonly be a reference to a main
Document object, but also works for a _Cell object, which itself can
contain paragraphs and tables.
"""
if isinstance(parent, _Document):
parent_elm = parent.element.body
elif isinstance(parent, _Cell):
parent_elm = parent._tc
elif isinstance(parent, _Row):
parent_elm = parent._tr
else:
raise ValueError("something's not right")
for child in parent_elm.iterchildren():
if isinstance(child, CT_P):
yield Paragraph(child, parent)
elif isinstance(child, CT_Tbl):
yield Table(child, parent)
def parase_table(table):
# 先定义结果dataframe
out_df = pd.DataFrame()
for i, row in enumerate(table.rows[:]):
row_content = []
for cell in row.cells:
c = cell.text.strip()
# print(c)
row_content.append(c)
out_df = pd.concat([out_df, pd.DataFrame(row_content)], axis=1, ignore_index=True)
return out_df.T
def get_choose_table(document, table_names: list):
table_names_rule = '|'.join(table_names)
table_names_data = {}
[table_names_data.update({key: ''}) for key in table_names]
# {'资产负债表': '', '收入费用表(1)': '', '收入费用表(2)': '',}
dw_pattern = re.compile(r''+table_names_rule)
for block in iter_block_items(document):
# 处理段落
if isinstance(block, Paragraph):
dw = dw_pattern.findall(block.text)
# 通过字符串匹配找到目标表格位置,并解析相应的内容
elif isinstance(block, Table) and dw:
# dw[0]为“资产负债表”
table_df = parase_table(block.table)
if "编制单位" in table_names_data:
pass
else:
table_names_data.update({'时间': table_df.iloc[0, -2]})
table_names_data.update({'单位': table_df.iloc[0, -1].replace(":", ":").split(":")[1]})
table_names_data.update({'编制单位': table_df.iloc[0, 0].replace(":", ":").split(":")[1]})
# 表头为“编制单位:德阳市旌阳区发展和改革局 2021年12月31日 2021年12月31日 单位:万元”,待删除
table_df.drop([0], inplace=True)
# 待将列字段 "0 1 2 3" 转换成 “项目 附注 年末数 年初数”
# print(table_df.iloc[0]) # 为“项目 附注 年末数 年初数”
table_df.rename(columns=table_df.iloc[0], inplace=True)
# 去掉当前表头即 “项目 附注 年末数 年初数”,因为rename后会有一行重复的内容,
table_df.drop(table_df.index[0], inplace=True)
# print(table_df)
table_names_data[dw[0]] = table_df
# 处理有目标信息的表格(表头包含目标信息)
elif isinstance(block, Table):
table_df = parase_table(block.table)
if table_df[0][0] in table_names:
update_table_key = table_df[0][0]
# 去掉表的头两行
table_df.drop([0, 1], inplace=True)
# 更新表头,即列名
table_df.rename(columns=table_df.iloc[0], inplace=True)
# 去掉第一行的值
table_df.drop(table_df.index[0], inplace=True)
# 拼接原表和续表
concated_df = pd.concat([table_names_data[update_table_key], table_df], ignore_index=True)
# 更新表名对应的value
table_names_data.update({update_table_key: concated_df})
# print(table_names_data)
# 将df内容格式转换为JSON格式
for table_key, table_value in table_names_data.items():
if isinstance(table_value, pd.DataFrame):
table_names_data.update({table_key: json.loads(table_value.to_json(orient='records', force_ascii=False))})
return table_names_data
def get_other_table(document, table_names: list):
table_names_rule = '|'.join(table_names)
table_names_data = {}
[table_names_data.update({key: ''}) for key in table_names]
# {'(2)以名义金额计量的资产名称、数量等情况,以及以名义金额计量理由的说明。': ''}
dw_pattern = re.compile(r''+table_names_rule)
for block in iter_block_items(document):
# 处理段落
if isinstance(block, Paragraph):
dw = dw_pattern.findall(block.text)
# 通过字符串匹配找到目标表格位置,并解析相应的内容
elif isinstance(block, Table) and dw:
# dw[0]为“资产负债表”
table_df = parase_table(block.table)
table_df.drop([0, 1], inplace=True)
# 选择目标df,注意这里是将其复制一份数据,若直接修改会引起警告
select_df = table_df.iloc[:, [0, 3]].copy()
select_df.rename(columns=select_df.iloc[0], inplace=True)
select_df.drop(table_df.index[0], inplace=True)
table_names_data[dw[0]] = select_df
# print(table_names_data)
# 将df内容格式转换为JSON格式
for table_key, table_value in table_names_data.items():
if isinstance(table_value, pd.DataFrame):
table_names_data.update({table_key: json.loads(table_value.to_json(orient='records', force_ascii=False))})
return table_names_data
def get_other1_table(document, table_names: list):
table_names_rule = '|'.join(table_names)
table_names_data = {}
[table_names_data.update({key: ''}) for key in table_names]
# [{'货币资金明细信息如下'}]
dw_pattern = re.compile(r'' + table_names_rule)
for block in iter_block_items(document):
# 处理段落
if isinstance(block, Paragraph):
dw = dw_pattern.findall(block.text)
# 通过字符串匹配找到目标表格位置,并解析相应的内容
elif isinstance(block, Table) and dw:
# dw[0]为“资产负债表”
table_df = parase_table(block.table)
table_df.drop([0, 1, 2], inplace=True)
# print(table_df)
# 选择目标df,注意这里是将其复制一份数据,若直接修改会引起警告
# select_df = table_df.iloc[:, [0, 3]].copy()
select_df = table_df.copy()
select_df.rename(columns=select_df.iloc[0], inplace=True)
# print(select_df)
select_df.drop(table_df.index[0], inplace=True)
table_names_data[dw[0]] = select_df
# print(table_names_data)
# 将df内容格式转换为JSON格式
for table_key, table_value in table_names_data.items():
if isinstance(table_value, pd.DataFrame):
table_names_data.update({table_key: json.loads(table_value.to_json(orient='records', force_ascii=False))})
return table_names_data
if __name__ == '__main__':
docx_file = r'data/3月23测试半成品.docx'
document = Document(docx_file)
table_names = ['货币资金明细信息如下']
print(get_other1_table(document, table_names))
# import datetime
# start_time = datetime.datetime.now()
# docx_file = r'data/四川报告模板.docx'
# document = Document(docx_file)
# data = get_choose_table(document, ['资产负债表', '收入费用表(1)', '收入费用表(2)'])
# # 处理资产负债表
# temp_list = data["资产负债表"]
# temp_dict = {}
#
# for temp in temp_list:
# temp_text = re.sub(":", ":", temp["项目"])
# if temp_text.endswith(":"):
# temp_dict.update({"temp_key": temp_text})
# continue
# else:
# temp["上级项目"] = temp_dict["temp_key"].strip(":")
#
#
# # 处理收入费用表(1)
# temp_list_0 = data["收入费用表(1)"]
# temp_dict_0 = {"temp_key": "收入合计"}
# # updata_list = ["收入合计", "本年盈余"]
# for temp_0 in temp_list_0:
# if temp_0["项目"].strip() == "收入合计":
# temp_dict_0.update({"temp_key": "本年盈余"})
# else:
# if temp_0["项目"].strip() == "本年盈余":
# continue
# else:
# temp_0["上级项目"] = temp_dict_0["temp_key"]
#
# # 处理收入费用表(2)
# temp_list_1 = data["收入费用表(2)"]
# temp_dict_1 = {"temp_key": "收入合计"}
# # updata_list = ["收入合计", "本年盈余"]
# for temp_1 in temp_list_1:
# if temp_1["项目"].strip() == "收入合计":
# temp_dict_1.update({"temp_key": "本年盈余"})
# else:
# if temp_1["项目"].strip() == "本年盈余":
# continue
# else:
# temp_1["上级项目"] = temp_dict_1["temp_key"]
# print(data)
# end_time = datetime.datetime.now()
# print(start_time)
# print(end_time)
# print("耗时: {}秒".format(end_time - start_time))
# __init__.py
__version__ = '2.2.0'
VERSION = tuple(map(int, __version__.split('.')))
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: connection.py
import socket
import os
import sys
import time
import random
from itertools import chain
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# start class Connection
class Connection(object):
'''Manage TCP comunication to and from Fastdfs Server.'''
def __init__(self, **conn_kwargs):
self.pid = os.getpid()
self.host_tuple = conn_kwargs['host_tuple']
self.remote_port = conn_kwargs['port']
self.remote_addr = None
self.timeout = conn_kwargs['timeout']
self._sock = None
def __del__(self):
try:
self.disconnect()
except:
pass
def connect(self):
'''Connect to fdfs server.'''
if self._sock:
return
try:
sock = self._connect()
except socket.error as e:
raise ConnectionError(self._errormessage(e))
self._sock = sock
# print '[+] Create a connection success.'
# print '\tLocal address is %s:%s.' % self._sock.getsockname()
# print '\tRemote address is %s:%s' % (self.remote_addr, self.remote_port)
def _connect(self):
'''Create TCP socket. The host is random one of host_tuple.'''
self.remote_addr = random.choice(self.host_tuple)
# print '[+] Connecting... remote: %s:%s' % (self.remote_addr, self.remote_port)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.settimeout(self.timeout)
sock = socket.create_connection((self.remote_addr, self.remote_port), self.timeout)
return sock
def disconnect(self):
'''Disconnect from fdfs server.'''
if self._sock is None:
return
try:
self._sock.close()
except socket.error as e:
raise ConnectionError(self._errormessage(e))
self._sock = None
def get_sock(self):
return self._sock
def _errormessage(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message" '''
if len(exception.args) == 1:
return "[-] Error: connect to %s:%s. %s." % (self.remote_addr, self.remote_port, exception.args[0])
else:
return "[-] Error: %s connect to %s:%s. %s." % \
(exception.args[0], self.remote_addr, self.remote_port, exception.args[1])
# end class Connection
# start ConnectionPool
class ConnectionPool(object):
'''Generic Connection Pool'''
def __init__(self, name='', conn_class=Connection,
max_conn=None, **conn_kwargs):
self.pool_name = name
self.pid = os.getpid()
self.conn_class = conn_class
self.max_conn = max_conn or 2 ** 31
self.conn_kwargs = conn_kwargs
self._conns_created = 0
self._conns_available = []
self._conns_inuse = set()
# print '[+] Create a connection pool success, name: %s.' % self.pool_name
def _check_pid(self):
if self.pid != os.getpid():
self.destroy()
self.__init__(self.conn_class, self.max_conn, **self.conn_kwargs)
def make_conn(self):
'''Create a new connection.'''
if self._conns_created >= self.max_conn:
raise ConnectionError('[-] Error: Too many connections.')
num_try = 10
while True:
try:
if num_try <= 0:
sys.exit()
conn_instance = self.conn_class(**self.conn_kwargs)
conn_instance.connect()
self._conns_created += 1
break
except ConnectionError as e:
print(e)
num_try -= 1
conn_instance = None
return conn_instance
def get_connection(self):
'''Get a connection from pool.'''
self._check_pid()
try:
conn = self._conns_available.pop()
# print '[+] Get a connection from pool %s.' % self.pool_name
# print '\tLocal address is %s:%s.' % conn._sock.getsockname()
# print '\tRemote address is %s:%s' % (conn.remote_addr, conn.remote_port)
except IndexError:
conn = self.make_conn()
self._conns_inuse.add(conn)
return conn
def remove(self, conn):
'''Remove connection from pool.'''
if conn in self._conns_inuse:
self._conns_inuse.remove(conn)
self._conns_created -= 1
if conn in self._conns_available:
self._conns_available.remove(conn)
self._conns_created -= 1
def destroy(self):
'''Disconnect all connections in the pool.'''
all_conns = chain(self._conns_inuse, self._conns_available)
for conn in all_conns:
conn.disconnect()
# print '[-] Destroy connection pool %s.' % self.pool_name
def release(self, conn):
'''Release the connection back to the pool.'''
self._check_pid()
if conn.pid == self.pid:
self._conns_inuse.remove(conn)
self._conns_available.append(conn)
# print '[-] Release connection back to pool %s.' % self.pool_name
# end ConnectionPool class
def tcp_recv_response(conn, bytes_size, buffer_size=4096):
'''Receive response from server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_size: int, will be received byte_stream size
@buffer_size: int, receive buffer size
@Return: tuple,(response, received_size)
'''
recv_buff = []
total_size = 0
try:
while bytes_size > 0:
resp = conn._sock.recv(buffer_size)
recv_buff.append(resp)
total_size += len(resp)
bytes_size -= len(resp)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while reading from socket: (%s)' % e.args)
return (b''.join(recv_buff), total_size)
def tcp_send_data(conn, bytes_stream):
'''Send buffer to server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_stream: trasmit buffer
@Return bool
'''
try:
conn._sock.sendall(bytes_stream)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while writting to socket: (%s)' % e.args)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: exceptions.py
'''Core exceptions raised by fdfs client'''
class FDFSError(Exception):
pass
class ConnectionError(FDFSError):
pass
class ResponseError(FDFSError):
pass
class InvaildResponse(FDFSError):
pass
class DataError(FDFSError):
pass
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: fdfs_protol.py
import struct
import socket
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# define FDFS protol constans
TRACKER_PROTO_CMD_STORAGE_JOIN = 81
FDFS_PROTO_CMD_QUIT = 82
TRACKER_PROTO_CMD_STORAGE_BEAT = 83 # storage heart beat
TRACKER_PROTO_CMD_STORAGE_REPORT_DISK_USAGE = 84 # report disk usage
TRACKER_PROTO_CMD_STORAGE_REPLICA_CHG = 85 # repl new storage servers
TRACKER_PROTO_CMD_STORAGE_SYNC_SRC_REQ = 86 # src storage require sync
TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_REQ = 87 # dest storage require sync
TRACKER_PROTO_CMD_STORAGE_SYNC_NOTIFY = 88 # sync done notify
TRACKER_PROTO_CMD_STORAGE_SYNC_REPORT = 89 # report src last synced time as dest server
TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_QUERY = 79 # dest storage query sync src storage server
TRACKER_PROTO_CMD_STORAGE_REPORT_IP_CHANGED = 78 # storage server report it's ip changed
TRACKER_PROTO_CMD_STORAGE_CHANGELOG_REQ = 77 # storage server request storage server's changelog
TRACKER_PROTO_CMD_STORAGE_REPORT_STATUS = 76 # report specified storage server status
TRACKER_PROTO_CMD_STORAGE_PARAMETER_REQ = 75 # storage server request parameters
TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FREE = 74 # storage report trunk free space
TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FID = 73 # storage report current trunk file id
TRACKER_PROTO_CMD_STORAGE_FETCH_TRUNK_FID = 72 # storage get current trunk file id
TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_START = 61 # start of tracker get system data files
TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_END = 62 # end of tracker get system data files
TRACKER_PROTO_CMD_TRACKER_GET_ONE_SYS_FILE = 63 # tracker get a system data file
TRACKER_PROTO_CMD_TRACKER_GET_STATUS = 64 # tracker get status of other tracker
TRACKER_PROTO_CMD_TRACKER_PING_LEADER = 65 # tracker ping leader
TRACKER_PROTO_CMD_TRACKER_NOTIFY_NEXT_LEADER = 66 # notify next leader to other trackers
TRACKER_PROTO_CMD_TRACKER_COMMIT_NEXT_LEADER = 67 # commit next leader to other trackers
TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP = 90
TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS = 91
TRACKER_PROTO_CMD_SERVER_LIST_STORAGE = 92
TRACKER_PROTO_CMD_SERVER_DELETE_STORAGE = 93
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE = 101
TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE = 102
TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE = 103
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE = 104
TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL = 105
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL = 106
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL = 107
TRACKER_PROTO_CMD_RESP = 100
FDFS_PROTO_CMD_ACTIVE_TEST = 111 # active test, tracker and storage both support since V1.28
STORAGE_PROTO_CMD_REPORT_CLIENT_IP = 9 # ip as tracker client
STORAGE_PROTO_CMD_UPLOAD_FILE = 11
STORAGE_PROTO_CMD_DELETE_FILE = 12
STORAGE_PROTO_CMD_SET_METADATA = 13
STORAGE_PROTO_CMD_DOWNLOAD_FILE = 14
STORAGE_PROTO_CMD_GET_METADATA = 15
STORAGE_PROTO_CMD_SYNC_CREATE_FILE = 16
STORAGE_PROTO_CMD_SYNC_DELETE_FILE = 17
STORAGE_PROTO_CMD_SYNC_UPDATE_FILE = 18
STORAGE_PROTO_CMD_SYNC_CREATE_LINK = 19
STORAGE_PROTO_CMD_CREATE_LINK = 20
STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE = 21
STORAGE_PROTO_CMD_QUERY_FILE_INFO = 22
STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE = 23 # create appender file
STORAGE_PROTO_CMD_APPEND_FILE = 24 # append file
STORAGE_PROTO_CMD_SYNC_APPEND_FILE = 25
STORAGE_PROTO_CMD_FETCH_ONE_PATH_BINLOG = 26 # fetch binlog of one store path
STORAGE_PROTO_CMD_RESP = TRACKER_PROTO_CMD_RESP
STORAGE_PROTO_CMD_UPLOAD_MASTER_FILE = STORAGE_PROTO_CMD_UPLOAD_FILE
STORAGE_PROTO_CMD_TRUNK_ALLOC_SPACE = 27 # since V3.00
STORAGE_PROTO_CMD_TRUNK_ALLOC_CONFIRM = 28 # since V3.00
STORAGE_PROTO_CMD_TRUNK_FREE_SPACE = 29 # since V3.00
STORAGE_PROTO_CMD_TRUNK_SYNC_BINLOG = 30 # since V3.00
STORAGE_PROTO_CMD_TRUNK_GET_BINLOG_SIZE = 31 # since V3.07
STORAGE_PROTO_CMD_TRUNK_DELETE_BINLOG_MARKS = 32 # since V3.07
STORAGE_PROTO_CMD_TRUNK_TRUNCATE_BINLOG_FILE = 33 # since V3.07
STORAGE_PROTO_CMD_MODIFY_FILE = 34 # since V3.08
STORAGE_PROTO_CMD_SYNC_MODIFY_FILE = 35 # since V3.08
STORAGE_PROTO_CMD_TRUNCATE_FILE = 36 # since V3.08
STORAGE_PROTO_CMD_SYNC_TRUNCATE_FILE = 37 # since V3.08
# for overwrite all old metadata
STORAGE_SET_METADATA_FLAG_OVERWRITE = 'O'
STORAGE_SET_METADATA_FLAG_OVERWRITE_STR = "O"
# for replace, insert when the meta item not exist, otherwise update it
STORAGE_SET_METADATA_FLAG_MERGE = 'M'
STORAGE_SET_METADATA_FLAG_MERGE_STR = "M"
FDFS_RECORD_SEPERATOR = '\x01'
FDFS_FIELD_SEPERATOR = '\x02'
# common constants
FDFS_GROUP_NAME_MAX_LEN = 16
IP_ADDRESS_SIZE = 16
FDFS_PROTO_PKG_LEN_SIZE = 8
FDFS_PROTO_CMD_SIZE = 1
FDFS_PROTO_STATUS_SIZE = 1
FDFS_PROTO_IP_PORT_SIZE = (IP_ADDRESS_SIZE + 6)
FDFS_MAX_SERVERS_EACH_GROUP = 32
FDFS_MAX_GROUPS = 512
FDFS_MAX_TRACKERS = 16
FDFS_DOMAIN_NAME_MAX_LEN = 128
FDFS_MAX_META_NAME_LEN = 64
FDFS_MAX_META_VALUE_LEN = 256
FDFS_FILE_PREFIX_MAX_LEN = 16
FDFS_LOGIC_FILE_PATH_LEN = 10
FDFS_TRUE_FILE_PATH_LEN = 6
FDFS_FILENAME_BASE64_LENGTH = 27
FDFS_TRUNK_FILE_INFO_LEN = 16
FDFS_FILE_EXT_NAME_MAX_LEN = 6
FDFS_SPACE_SIZE_BASE_INDEX = 2 # storage space size based (MB)
FDFS_UPLOAD_BY_BUFFER = 1
FDFS_UPLOAD_BY_FILENAME = 2
FDFS_UPLOAD_BY_FILE = 3
FDFS_DOWNLOAD_TO_BUFFER = 1
FDFS_DOWNLOAD_TO_FILE = 2
FDFS_NORMAL_LOGIC_FILENAME_LENGTH = (
FDFS_LOGIC_FILE_PATH_LEN + FDFS_FILENAME_BASE64_LENGTH + FDFS_FILE_EXT_NAME_MAX_LEN + 1)
FDFS_TRUNK_FILENAME_LENGTH = (
FDFS_TRUE_FILE_PATH_LEN + FDFS_FILENAME_BASE64_LENGTH + FDFS_TRUNK_FILE_INFO_LEN + 1 + FDFS_FILE_EXT_NAME_MAX_LEN)
FDFS_TRUNK_LOGIC_FILENAME_LENGTH = (FDFS_TRUNK_FILENAME_LENGTH + (FDFS_LOGIC_FILE_PATH_LEN - FDFS_TRUE_FILE_PATH_LEN))
FDFS_VERSION_SIZE = 6
TRACKER_QUERY_STORAGE_FETCH_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE)
TRACKER_QUERY_STORAGE_STORE_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE + 1)
# status code, order is important!
FDFS_STORAGE_STATUS_INIT = 0
FDFS_STORAGE_STATUS_WAIT_SYNC = 1
FDFS_STORAGE_STATUS_SYNCING = 2
FDFS_STORAGE_STATUS_IP_CHANGED = 3
FDFS_STORAGE_STATUS_DELETED = 4
FDFS_STORAGE_STATUS_OFFLINE = 5
FDFS_STORAGE_STATUS_ONLINE = 6
FDFS_STORAGE_STATUS_ACTIVE = 7
FDFS_STORAGE_STATUS_RECOVERY = 9
FDFS_STORAGE_STATUS_NONE = 99
class Storage_server(object):
'''Class storage server for upload.'''
def __init__(self):
self.ip_addr = None
self.port = None
self.group_name = ''
self.store_path_index = 0
# Class tracker_header
class Tracker_header(object):
'''
Class for Pack or Unpack tracker header
struct tracker_header{
char pkg_len[FDFS_PROTO_PKG_LEN_SIZE],
char cmd,
char status,
}
'''
def __init__(self):
self.fmt = '!QBB' # pkg_len[FDFS_PROTO_PKG_LEN_SIZE] + cmd + status
self.st = struct.Struct(self.fmt)
self.pkg_len = 0
self.cmd = 0
self.status = 0
def _pack(self, pkg_len=0, cmd=0, status=0):
return self.st.pack(pkg_len, cmd, status)
def _unpack(self, bytes_stream):
self.pkg_len, self.cmd, self.status = self.st.unpack(bytes_stream)
return True
def header_len(self):
return self.st.size
def send_header(self, conn):
'''Send Tracker header to server.'''
header = self._pack(self.pkg_len, self.cmd, self.status)
try:
conn._sock.sendall(header)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while writting to socket: %s' % (e.args,))
def recv_header(self, conn):
'''Receive response from server.
if sucess, class member (pkg_len, cmd, status) is response.
'''
try:
header = conn._sock.recv(self.header_len())
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while reading from socket: %s' % (e.args,))
self._unpack(header)
def fdfs_pack_metadata(meta_dict):
ret = ''
for key in meta_dict:
ret += '%s%c%s%c' % (key, FDFS_FIELD_SEPERATOR, meta_dict[key], FDFS_RECORD_SEPERATOR)
return ret[0:-1]
def fdfs_unpack_metadata(bytes_stream):
li = bytes_stream.split(FDFS_RECORD_SEPERATOR)
return dict([item.split(FDFS_FIELD_SEPERATOR) for item in li])
#!/usr/bin/env python
# -*- coding = utf-8 -*-
# filename: utils.py
import io
import os
import sys
import stat
import platform
import configparser
SUFFIX = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
__os_sep__ = "/" if platform.system() == 'Windows' else os.sep
def appromix(size, base=0):
'''Conver bytes stream size to human-readable format.
Keyword arguments:
size: int, bytes stream size
base: int, suffix index
Return: string
'''
multiples = 1024
if size < 0:
raise ValueError('[-] Error: number must be non-negative.')
if size < multiples:
return '{0:d}{1}'.format(size, SUFFIX[base])
for suffix in SUFFIX[base:]:
if size < multiples:
return '{0:.2f}{1}'.format(size, suffix)
size = size / float(multiples)
raise ValueError('[-] Error: number too big.')
def get_file_ext_name(filename, double_ext=True):
li = filename.split(os.extsep)
if len(li) <= 1:
return ''
else:
if li[-1].find(__os_sep__) != -1:
return ''
if double_ext:
if len(li) > 2:
if li[-2].find(__os_sep__) == -1:
return '%s.%s' % (li[-2], li[-1])
return li[-1]
class Fdfs_ConfigParser(configparser.RawConfigParser):
"""
Extends ConfigParser to allow files without sections.
This is done by wrapping read files and prepending them with a placeholder
section, which defaults to '__config__'
"""
def __init__(self, default_section=None, *args, **kwargs):
configparser.RawConfigParser.__init__(self, *args, **kwargs)
self._default_section = None
self.set_default_section(default_section or '__config__')
def get_default_section(self):
return self._default_section
def set_default_section(self, section):
self.add_section(section)
# move all values from the previous default section to the new one
try:
default_section_items = self.items(self._default_section)
self.remove_section(self._default_section)
except configparser.NoSectionError:
pass
else:
for (key, value) in default_section_items:
self.set(section, key, value)
self._default_section = section
def read(self, filenames):
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename) as fp:
self.readfp(fp)
except IOError:
continue
else:
read_ok.append(filename)
return read_ok
def readfp(self, fp, *args, **kwargs):
stream = io.StringIO()
try:
stream.name = fp.name
except AttributeError:
pass
stream.write('[' + self._default_section + ']\n')
stream.write(fp.read())
stream.seek(0, 0)
return self._read(stream, stream.name)
def write(self, fp):
# Write the items from the default section manually and then remove them
# from the data. They'll be re-added later.
try:
default_section_items = self.items(self._default_section)
self.remove_section(self._default_section)
for (key, value) in default_section_items:
fp.write("{0} = {1}\n".format(key, value))
fp.write("\n")
except configparser.NoSectionError:
pass
configparser.RawConfigParser.write(self, fp)
self.add_section(self._default_section)
for (key, value) in default_section_items:
self.set(self._default_section, key, value)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
if optname in cursect:
if not isinstance(cursect[optname], list):
cursect[optname] = [cursect[optname]]
cursect[optname].append(optval)
else:
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
def split_remote_fileid(remote_file_id):
'''
Splite remote_file_id to (group_name, remote_file_name)
arguments:
@remote_file_id: string
@return tuple, (group_name, remote_file_name)
'''
index = remote_file_id.find(b'/')
if -1 == index:
return None
return (remote_file_id[0:index], remote_file_id[(index + 1):])
def fdfs_check_file(filename):
ret = True
errmsg = ''
if not os.path.isfile(filename):
ret = False
errmsg = '[-] Error: %s is not a file.' % filename
elif not stat.S_ISREG(os.stat(filename).st_mode):
ret = False
errmsg = '[-] Error: %s is not a regular file.' % filename
return (ret, errmsg)
if __name__ == '__main__':
print(get_file_ext_name('/bc.tar.gz'))
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/12/2 17:21
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : pic_plt
# @Author : LiuYan
# @Time : 2021/12/17 16:21
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from matplotlib import rcParams
from pathlib import Path
from base.config.base_config import root_dir
pic_plt_dir = os.path.join(root_dir, 'generate/plt')
Path(pic_plt_dir).mkdir(parents=True, exist_ok=True)
# for chinese show
# 1. 中英文统一: 中文格式
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = ['SimSun'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False
# 2. 中文√ 英文: Latex
# config = {
# "font.family": 'serif',
# "mathtext.fontset": 'stix',
# "font.serif": ['SimSun'],
# }
# rcParams.update(config)
# 3. error
# mpl.use('pgf')
# pgf_config = {
# "font.family": 'serif',
# "mathtext.fontset": 'stix',
# "pgf.rcfonts": False,
# "text.usetex": True,
# "pgf.preamble": [
# r"\usepackage{unicode-math}",
# r"\setmainfont{Times New Roman}",
# r"\usepackage{xeCJK}",
# r"\xeCJKsetup{CJKmath=true}",
# r"\setCJKmainfont{SimSun}"
# ]
# }
# rcParams.update(pgf_config)
def plt_bar(names, values, picture):
plt.figure(figsize=(9, 3))
plt.bar(names, values) # bar
# plt.suptitle('柱状图', fontproperties=myfont)
plt.savefig(picture) # eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff 都可以
plt.show()
def plt_plot(names, values, picture):
plt.figure(figsize=(9, 3))
plt.plot(names, values) # line
# plt.suptitle('折线图', fontproperties=myfont)
plt.savefig(picture) # eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff 都可以
plt.show()
def pic_plt_pie(keys: list, values: list, title: str or None) -> str:
# plt画饼图(数据,数据对应的标签,百分数保留两位小数点)
pic_plt_path = os.path.join(pic_plt_dir, 'plt_pie.png')
plt.pie(
x=values,
labels=keys,
autopct='%1.1f%%',
startangle=90
)
plt.title(title)
plt.savefig(pic_plt_path) # eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff 都可以
# plt.show()
plt.close()
return pic_plt_path
def plt_pie_2(labels, sizes, picture):
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
explode = (0.1, 0.1, 0.1, 0.1, 0.1, 0.1) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
labels = ['China', 'Swiss', 'USA', 'UK', 'Laos在', 'Spain']
X = [222, 42, 455, 664, 454, 334]
ax1.pie(x=X, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# plt.suptitle('饼状图', fontproperties=myfont)
plt.savefig(picture) # eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff 都可以
plt.show()
def plt_scatter(names, values, picture):
plt.figure(figsize=(9, 3))
plt.scatter(names, values) # scatter
# plt.suptitle('散点图', fontproperties=myfont)
plt.savefig(picture) # eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff 都可以
plt.show()
if __name__ == '__main__':
# print(mpl.matplotlib_fname())
# Test -> plt
# plt.title(r'宋体 Times New Roman')
# plt.axis('off')
# plt.savefig('usestix.png')
# plt.show()
keys = ['Frogs', 'Hogs', 'Dogs', 'Logs']
values = [15, 30, 45, 10]
pic_plt_pie(keys, values, "国资研究")
# plt_bar(names, values, '柱状图.jpg')
# plt_plot(names, values, '折线图.jpg')
# plt_pie_2(labels, sizes, '饼状图.jpg')
# plt_scatter(names, values, '散点图.jpg')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : process
# @Author : LiuYan
# @Time : 2021/11/9 16:06
import chevron
from generate.gen_user_report_auto_generated import GeneralUserReportAutoGenerated
if __name__ == '__main__':
# from docxtpl import DocxTemplate
#
# doc = DocxTemplate("template.docx") # 模板文档
# context = {'app_name': "模块测试", 'dataset_1_company_name': "2020-07-01", 'vac_time': "8:30", "reason": "上班未打卡"} # 待替换对象
# doc.render(context) # 执行替换
# doc.save("generated_doc.docx") # 保存新的文档
# from jinja2 import Template
#
# person = {'name': 'Person', 'age': 34}
#
# tm = Template("My name is {{ per.name }} and I am {{ per.age }}, {{ per.test }} ")
# # tm = Template("My name is {{ per['name'] }} and I am {{ per['age'] }}")
# msg = tm.render(per=person)
#
# print(msg)
# s = chevron.render('Hello, {{ dataset_1_Company_Name }}!', {'dataset_1.Company_Name': 'World'})
# s = chevron.render(
# 'start_date={{start_time}}&end_date={{end_time}}&top_n={{top_n}}',
# {'start_time': 'st', 'end_time': 'et'}
# )
# project_name = '研究中心'
# input_template_path = '../data/datasource/input/template.docx'
# output_report_path = '../data/datasource/output/企业分析报告_20211223.docx'
project_name = '评价中心'
input_template_path = '../data/datasource/input/template_kaige.docx'
output_report_path = '../data/datasource/output/template_kaige_2048.docx'
gurag = GeneralUserReportAutoGenerated(
project_name=project_name,
input_template_path=input_template_path,
output_report_path=output_report_path,
start_time='2022-01-10', end_time='2022-01-17',
parameter='reportId=2048'
)
gurag.process()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : regular_extract
# @Author : LiuYan
# @Time : 2021/12/7 18:01
import re
class RegularExtract:
def match_pattern(self, para_text: str, patterns: list) -> set:
"""
提取符合规则的字符串
:param para_text: 待匹配字符串
:param patterns: 正则列表
:return:
"""
set_match = set()
for pattern_str in patterns:
pattern = re.compile(r'' + pattern_str)
# 找到与之匹配的所有子串,并以迭代器形式返回,与findall 类似
results = re.finditer(pattern, para_text)
for result in results:
# 带索引位置
# print(result)
result = result.group().strip()
# 不带索引位置信息
# print(result)
set_match.add(result)
return set_match
def match_index(self, para_text: str, pattern_str: str) -> list:
"""
提取符合规则的字符串开始结束位置列表
:param para_text: 待匹配字符串
:param pattern_str: 正则字符串
:return:
"""
list_index = []
pattern = re.compile(r'' + pattern_str)
results = re.finditer(pattern, para_text)
for result in results:
start_index, end_index = result.span()
list_index.append(
{
'start_index': start_index,
'end_index': end_index
}
)
return list_index
import os
import time
from typing import Any
from selenium import webdriver
SNAPSHOT_JS = """
var ele = document.querySelector('div[_echarts_instance_]');
var mychart = echarts.getInstanceByDom(ele);
return mychart.getDataURL({
type: '%s',
pixelRatio: %s,
excludeComponents: ['toolbox']
});
"""
SNAPSHOT_SVG_JS = """
var element = document.querySelector('div[_echarts_instance_] div');
return element.innerHTML;
"""
def make_snapshot(
html_path: str,
file_type: str,
pixel_ratio: int = 2,
delay: int = 2,
browser="Chrome",
driver: Any = None,
):
if delay < 0:
raise Exception("Time travel is not possible")
if not driver:
if browser == "Chrome":
driver = get_chrome_driver()
elif browser == "Safari":
driver = get_safari_driver()
else:
raise Exception("Unknown browser!")
if file_type == "svg":
snapshot_js = SNAPSHOT_SVG_JS
else:
snapshot_js = SNAPSHOT_JS % (file_type, pixel_ratio)
if not html_path.startswith("http"):
html_path = "file://" + os.path.abspath(html_path)
driver.get(html_path)
time.sleep(delay)
return driver.execute_script(snapshot_js)
# def get_chrome_driver():
# options = webdriver.ChromeOptions()
# options.add_argument("headless")
# return webdriver.Chrome(options=options)
def get_chrome_driver():
options = webdriver.ChromeOptions()
options.add_argument("headless")
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
options.add_argument('--disable-dev-shm-usage')
return webdriver.Chrome(options=options)
def get_safari_driver():
return webdriver.Safari()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : utils
# @Author : LiuYan
# @Time : 2021/11/9 16:50
from docx.shared import Pt
from docx.shared import RGBColor
from docx.oxml.ns import qn
def paragraph_attribute(pa, size, family, r=0x00, g=0x00, b=0x00, bold=False):
pa.font.size = Pt(size)
pa.font.name = family
pa.font.bold = bold
pa.font.color.rgb = RGBColor(r, g, b)
p = pa._element.rPr.rFonts.set(qn('w:eastAsia'), family)
return p
def judge_str_pre_suf(s1: str, s2: str) -> bool:
"""
比较字符串s1后缀(suffix)与字符串s2前缀(prefix)是否有重叠
有重叠 -> True
无重叠 -> False
:param s1: str
:param s2: str
:return: bool
"""
m = min(len(s1), len(s2))
for i in range(1, m + 1):
print(i)
if s1[-i:] == s2[: i]:
return True
return False
if __name__ == '__main__':
s1 = 'company&'
b = judge_str_pre_suf(s1='company&', s2='&company_title&')
print(b)
print(s1[-1:])
# -*- coding: utf-8 -*-
# @Time : 2023/3/23 09:43
# @Author : bruxellse_li
# @File : generate_report.py
# @Project : 从word中提取指定表格
from docx import Document
import os, time
from pathlib import Path
import requests
from flask import request
from flask import Flask, send_file
# from transform_doc_to_docx import doc2docx, closesoft
import subprocess
from generate.gen_user_report_auto_generated import main_process
UPLOAD_FOLDER = r'data' # 上传路径
Path(UPLOAD_FOLDER).mkdir(parents=True, exist_ok=True)
abs_path = os.path.split(os.path.realpath(__file__))[0]
def doc2docx(doc_path, docx_path):
# 使用LibreOffice将doc文件转换为docx文件
subprocess.call(['libreoffice', '--headless', '--convert-to', 'docx', doc_path, '--outdir', os.path.dirname(docx_path)])
# 将转换后的docx文件重命名为目标文件名
os.rename(os.path.splitext(doc_path)[0] + '.docx', docx_path)
def generate_report(template_path, document_path, report_name, object):
"""
template_path : 模板文件下载地址
document_path: 半成品文件下载地址
report_name: 报告名称
data_object: 待填充数据
:return:
"""
template_request = template_path
doc_request = document_path
report_name = report_name + ".docx"
data_object = object["data_object"]
tables_dict = object["tables_dict"]
current_filename = time.strftime('%Y_%m_%d-%H_%M_%S') + ".docx"
save_path = UPLOAD_FOLDER + "/" + current_filename
# 先判断是否是docx 格式
template_filename = template_request.split("/")[-1]
if ".doc" in template_request:
temp_template_path = os.path.join(UPLOAD_FOLDER, template_filename)
# 获取文件路径前缀
template_path = os.path.splitext(temp_template_path)[0] + '.docx'
# 将doc转换为docx
doc2docx(temp_template_path, template_path)
elif ".docx" in template_request:
template_path = os.path.join(UPLOAD_FOLDER, template_filename)
else:
return "上传文件格式有误,当前仅支持doc 和 docx 格式,请选择正确文件重新上传!"
doc_filename = doc_request.split("/")[-1]
if ".doc" in doc_request:
temp_doc_path = os.path.join(UPLOAD_FOLDER, doc_filename)
# 获取文件路径前缀
doc_path = os.path.splitext(temp_doc_path)[0] + '.docx'
# 将doc转换为docx
doc2docx(temp_doc_path, doc_path)
half_work_path = doc_path
elif ".docx" in doc_request:
half_work_path = os.path.join(UPLOAD_FOLDER, template_filename)
else:
return "上传文件格式有误,当前仅支持doc 和 docx 格式,请选择正确文件重新上传!"
main_process(half_work_path, tables_dict, template_path, report_name, data_object, save_path)
# send_path = os.path.join(UPLOAD_FOLDER, report_name)
# return send_file(send_path, as_attachment=True)
if __name__ == "__main__":
template_path = "data/wKjIbGQb3gaARMRCAACAACgxnK856.docx"
document_path = "data/wKjIbGQb66OAH-8eAAQAAJHFYeM24.docx"
report_name = "财务报告"
object = {
"tables_dict": {
"table13": "(2)以名义金额计量的资产名称、数量等情况,以及以名义金额计量理由的说明。",
"table5": "收入费用表(2)",
"table4": "收入费用表(1)",
"table3": "资产负债表续表2",
"table2": "资产负债表续表1",
"table1": "资产负债表",
"table9": "(17)其他应付款明细信息如下:",
"table8": "(9)无形资产明细信息如下:",
"table10": "(24)其他收入明细信息如下:",
"table7": "(7)固定资产明细信息如下:",
"table11": "(25)业务活动费用明细信息如下:",
"table6": "(1)货币资金明细信息如下:",
"table12": "(28)商品和服务费用明细信息如下:"
},
"data_object": {
"负债占比": [],
"费用占比": [],
"流动资产占比": [],
"流动负债占比": [],
"收入占比": [],
"finance": {
"currentLiabilitiesCompose": "",
"cashRatio": "",
"revenueExpensesRatio": "",
"currentRatioRemark": "说明本单位流动资产偿还短期债务的能力弱",
"beInDebtChangeRatioRemark": "",
"assetLiabilityRatioRemark": "说明本单位财务风险低",
"totalExpensesCompose": "",
"debtComposition": "流动负债占"",非流动负债占""",
"totalRevenueCompose": "",
"totalExpensesChangeRatioRemark": "",
"composition": "流动资产占比,非流动资产占比",
"unitDebtComposition": "流动负债和非流动负债",
"cashRatioRemark": "说明本单位利用现金和现金等价物偿还短期债务的能力弱",
"affordableHouseNewRation": "",
"totalAssetsChangeRatioRemark": "",
"currentRatio": "",
"currentAssetsCompose": "",
"publicInfrastructureNewRatio": "",
"totalRevenueComposeDetail": "",
"assetLiabilityRatio": "",
"unitAssetComposition": "流动资产和非流动资产",
"fixedAssetsDepreciationRatio": "",
"revenueExpensesRatioRemark": "大于",
"totalRevenueChangeRatioRemark": "",
"otherRemark": "",
"nonCurrentAssetsCompose": ""
},
"资产占比": [],
"info": {
"internalControl": "2021年,本单位加强学习国家和省关于内部控制的文件。建立健全了单位层面的内部控制体系和制度,健全了预算、收支、采购、建设、资产和合同的内控流程和制度,把内部控制落实在业务流程中,实现了不相容岗位相互分离、形成相互制约、相互监督的工作机制;实现了内部授权审批控制。",
"unitName": "安丘速度单位",
"unitCall": "本部门",
"mainFunctions": "无资料数据",
"year": "2023",
"unitBudgetLevel": "",
"institutionalSituation": "无资料数据",
"performanceManagement": "2021年,本单位按照绩效管理要求对照设定预算绩效目标、绩效指标的成本指标、产出指标、效益指标、满意度指标等具体内容,开展项目绩效目标申报、运行监控和自评工作。通过预算绩效管理对工作中存在的薄弱环节作出针对性查漏补缺和持续完善。",
"LastYear": "2022",
"personnelSituation": "无资料数据",
"unitType": "",
"budgetManagement": "2021年,本单位严格按照《预算法》、《会计法》、《政府会计制度》和上级的文件建立健全财务制度;严格执行财经纪律和各项财务制度;强化预算管理,加强对银行存款和现金的管理;单位对年终决算高度重视,组织专人负责编制决算报告,对决算数据进行了严格审核,认真分析并应用到下年的预算工作。",
"assetManagement": "2021年,本单位资产实行分类管理,建立健全了资产内部管理制度;单位加强对实物资产和无形资产的管理,明确相关部门和岗位的职责权限,强化对配置、使用和处置等关键环节的管控;明确资产使用和保管责任人,落实资产使用人在资产管理中的责任。",
"pppProject": "本单位无PPP项目。",
"careerAchievements": "无资料数据"
},
"非流动资产占比": []
}
}
generate_report(template_path, document_path, report_name, object)
#!/user/bin/env python
# coding=utf-8
"""
@project : 500_资讯
@author : bruxelles_li
@file : lac_ner_text.py
@ide : PyCharm
@time : 2022-07-04 09:19:43
"""
from LAC import LAC
import pandas as pd
import tqdm
import re
lac = LAC(mode="lac")
# text_path = "领导讲话_1370.xlsx"
# 句子提取人名
def lac_username(sentences):
# 装载LAC模型
user_name_list = []
lac = LAC(mode="lac")
lac_result = lac.run(sentences)
# print(lac_result)
for index, lac_label in enumerate(lac_result[1]):
if lac_label == "PER":
user_name_list.append(lac_result[0][index])
# print(user_name_list)
# print(user_name_list)
return user_name_list
# 句子提取机构名
def lac_organize_name(sentences):
# 装载LAC模型
user_name_list = []
lac = LAC(mode="lac")
lac_result = lac.run(sentences)
# print(lac_result)
for index, lac_label in enumerate(lac_result[1]):
if lac_label == "ORG":
user_name_list.append(lac_result[0][index])
return user_name_list
# 句子提取地名
def lac_location_name(sentences):
# 装载LAC模型
user_name_list = []
lac = LAC(mode="lac")
lac_result = lac.run(sentences)
# print(lac_result)
for index, lac_label in enumerate(lac_result[1]):
if lac_label == "LOC":
user_name_list.append(lac_result[0][index])
return user_name_list
def match_text_one(rule, text):
# rule = ";".join(new_one)
# print(rule)
# text_one = match_text_one(rule, title)
# print(text_one)
rules = '|'.join(rule.split(';')).strip('\n')
replaced_rules = rules.replace('.', '\.')\
.replace('*', '\*')\
.replace('(', '\(')\
.replace(')', '\)')\
.replace('+', '.+')
pattern = re.compile(r'' + replaced_rules)
print(pattern)
match_result = re.sub(pattern, "A", text)
print(match_result)
return match_result
if __name__ == '__main__':
print(lac_organize_name("广元市朝天区曾家镇人民政府辖11个村1社区78个村民小组2个居民小组,全镇4784户16355人。财政供养人口67人,其中行政人员25人,事业人员39人,机关工勤3人。相比上年增加6人,增加的原因是工作变动人员正常调出。"))
# print(lac_username(sentences="习近平指出阿里分析师研报,权威,专业,及时,全面,助您挖掘潜力主题机会! 【概述】泡财经获悉,4月29日晚间,上汽集团(600104.SH)公告称,一季度净利润55.16亿元,同比下降19.44%。"))
# data_df = pd.read_excel(text_path, nrows=1).astype(str)
# result_list = []
# for idx, row in tqdm.tqdm(data_df.iterrows()):
# title = row['title']
# a_user = lac_username(title)
# a_organize = lac_organize_name(title)
# a_location = lac_location_name(title)
# if a_user:
# user_rule = '|'.join(a_user).strip()
# pattern0 = re.compile(r'' + user_rule)
# result_one = re.sub(pattern0, 'A', title)
# title = result_one
# if a_organize:
# a_organize_rule = '|'.join(a_organize).strip()
# pattern1 = re.compile(r'' + a_organize_rule)
# result_two = re.sub(pattern1, 'B', result_one)
# title = result_two
# if a_location:
# a_location_rule = '|'.join(a_location).strip()
# pattern2 = re.compile(r'' + a_location_rule)
# print(pattern2)
# result_three = re.sub(pattern2, 'C', result_two)
# print(result_three)
# title = result_three
#
# row['title'] = title
# result_list.append(row)
# print(result_list)
#
# # new_one = a_user + a_organize + a_location
# # rule = "|".join(new_one)
# # pattern = re.compile(r'' + rule)
# # result_one = re.sub(pattern, "A", title)
# # title = result_one
# # print(title)
# python 3.8.5
beautifulsoup4==4.12.2
chevron==0.14.0
cx_Oracle==8.3.0
docx==0.2.4
fdfs_client==4.0.7
flask_cors==3.0.10
flask_sqlalchemy==3.0.3
LAC==2.1.2
matplotlib==3.5.1
numpy==1.21.5
pandas==1.4.2
pyecharts==2.0.2
pymysql==1.0.3
requests==2.27.1
selenium==4.8.3
snapshot_selenium==0.0.2
tqdm==4.64.0
gunicorn
Flask
python-docx
xlsxwriter
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : app_config
# @Author : LiuYan&bruxelles_li
# @Time : 2021/4/22 10:51
import os
import multiprocessing
from pathlib import Path
bind = '0.0.0.0:4000' # 绑定ip和端口号
backlog = 512 # 监听队列
# chdir = '/home/zzsn/liuyan/bin' # gunicorn要切换到的目的工作目录
timeout = 300 # 超时 -> 目前为迎合ZZSN_NLP平台 一带一路要素抽取(文件)需求 暂时关闭超时
# worker_class = 'gevent' # 使用gevent模式,还可以使用sync 模式,默认的是sync模式
# workers = multiprocessing.cpu_count() # 进程数 12
workers = 3 # 低资源 13G 服务器负载过大可调整此处为 1
threads = 50 # 指定每个进程开启的线程数
loglevel = 'error' # 日志级别,这个日志级别指的是错误日志的级别,而访问日志的级别无法设置
access_log_format = '%(t)s %(p)s %(h)s "%(r)s" %(s)s %(L)s %(b)s %(f)s" "%(a)s"' # 设置gunicorn访问日志格式,错误日志无法设置
"""
其每个选项的含义如下:
h remote address
l '-'
u currently '-', may be user name in future releases
t date of the request
r status line (e.g. ``GET / HTTP/1.1``)
s status
b response length or '-'
f referer
a user agent
T request time in seconds
D request time in microseconds
L request time in decimal seconds
p process ID
"""
_tmp_path = os.path.dirname(os.path.abspath(__file__))
_tmp_path = os.path.join(_tmp_path, 'log')
Path(_tmp_path).mkdir(parents=True, exist_ok=True)
accesslog = os.path.join(_tmp_path, 'gunicorn_access.log') # 访问日志文件
errorlog = os.path.join(_tmp_path, 'gunicorn_error.log') # 错误日志文件
# gunicorn -c app_config.py app_run:app -D --daemon
This source diff could not be displayed because it is too large. You can view the blob instead.
#!/bin/sh
# description: auto_run
# 四川报告生成监控
# 检测脚本是否在运行,若已经在运行,则等待一段时间后再次检查,若未启动则进行启动
function start_interface() {
INTERFACE_IS_STRAT=`ps -ef | grep scbg_app_config.py | grep -v grep | wc -l`
if [ $INTERFACE_IS_STRAT -eq 4 ] ; then
usleep
else
echo "=========Service Will Start=========="
# cd /data/lzc/scbg-python/SCBG-PYTHON && nohup gunicorn -c scbg_app_config.py app_run:app 2>&1 &
cd /opt/SCBG-PYTHON && exec nohup gunicorn -c scbg_app_config.py app_run:app 2>&1 &
echo "=========Service Start Completed!========"
fi
}
# 方法一:使用 echo 命令保持服务运行状态(容器启动时)
# 方法二: 使用自定义接口检测函数来保持服务运行状态
while true
do
echo "PYTHON SERVICE is running..."
start_interface
sleep 30m
done
\ No newline at end of file
!function(){function n(n,e,t,r,u,o,a){try{var i=n[o](a),c=i.value}catch(s){return void t(s)}i.done?e(c):Promise.resolve(c).then(r,u)}System.register(["./index-legacy.90883ee8.js","./vendor-legacy.bbc7855f.js","./index-legacy.f25f109a.js"],(function(e){"use strict";var t,r,u,o,a,i,c,s,l,f,d;return{setters:[function(n){t=n.l},function(n){r=n.d,u=n.U,o=n.L,a=n.u,i=n.r,c=n.c,s=n.F,l=n.I,f=n.B,d=n.b},function(){}],execute:function(){e("default",r({name:"app",components:{UserOutlined:u,LockOutlined:o},setup:function(){var e=a(),r=i({username:"",password:"",password2:""}),p=function(n){console.log(n,r)},m=function(n){console.log(n)},v=function(){var u,o=(u=regeneratorRuntime.mark((function n(){return regeneratorRuntime.wrap((function(n){for(;;)switch(n.prev=n.next){case 0:return n.next=2,t(r);case 2:n.sent.isHandleSuccess&&e.replace("/home");case 4:case"end":return n.stop()}}),n)})),function(){var e=this,t=arguments;return new Promise((function(r,o){var a=u.apply(e,t);function i(e){n(a,r,o,i,c,"next",e)}function c(e){n(a,r,o,i,c,"throw",e)}i(void 0)}))});return function(){return o.apply(this,arguments)}}();return function(){return c("div",{class:"login"},[c(s,{layout:"vertical",model:r,onFinish:p,onFinishFailed:m},{default:function(){return[c(s.Item,null,{default:function(){return[c(l,{value:r.username,"onUpdate:value":function(n){return r.username=n},placeholder:"Username"},{prefix:function(){return c(u,{style:"color: rgba(0, 0, 0, 0.25)"},null)}})]}}),c(s.Item,null,{default:function(){return[c(l,{value:r.password,"onUpdate:value":function(n){return r.password=n},type:"password",placeholder:"Password"},{prefix:function(){return c(o,{style:"color: rgba(0, 0, 0, 0.25)"},null)}})]}}),c(s.Item,null,{default:function(){return[c(f,{type:"primary","html-type":"submit",disabled:""===r.username||""===r.password,onClick:v},{default:function(){return[d("Log in")]}})]}})]}})])}}}))}}}))}();
import{l as e}from"./index.f608f11e.js";import{d as a,U as s,L as l,u as o,r,c as n,F as t,I as d,B as u,b as p}from"./vendor.98dba853.js";/* empty css */const i=a({name:"app",components:{UserOutlined:s,LockOutlined:l},setup(){const a=o(),i=r({username:"",password:"",password2:""}),m=e=>{console.log(e,i)},c=e=>{console.log(e)},f=async()=>{(await e(i)).isHandleSuccess&&a.replace("/home")};return()=>n("div",{class:"login"},[n(t,{layout:"vertical",model:i,onFinish:m,onFinishFailed:c},{default:()=>[n(t.Item,null,{default:()=>[n(d,{value:i.username,"onUpdate:value":e=>i.username=e,placeholder:"Username"},{prefix:()=>n(s,{style:"color: rgba(0, 0, 0, 0.25)"},null)})]}),n(t.Item,null,{default:()=>[n(d,{value:i.password,"onUpdate:value":e=>i.password=e,type:"password",placeholder:"Password"},{prefix:()=>n(l,{style:"color: rgba(0, 0, 0, 0.25)"},null)})]}),n(t.Item,null,{default:()=>[n(u,{type:"primary","html-type":"submit",disabled:""===i.username||""===i.password,onClick:f},{default:()=>[p("Log in")]})]})]})])}});export{i as default};
!function(){function n(n,e,t,r,u,o,a){try{var i=n[o](a),c=i.value}catch(l){return void t(l)}i.done?e(c):Promise.resolve(c).then(r,u)}System.register(["./vendor-legacy.bbc7855f.js","./index-legacy.f25f109a.js","./index-legacy.90883ee8.js"],(function(e){"use strict";var t,r,u,o,a,i,c,l,s,f,d,p;return{setters:[function(n){t=n.d,r=n.U,u=n.L,o=n.u,a=n.r,i=n.c,c=n.b,l=n.j,s=n.F,f=n.I,d=n.B},function(){},function(n){p=n.s}],execute:function(){e("default",t({name:"app",components:{UserOutlined:r,LockOutlined:u},setup:function(){o();var e=a({username:"",password:""}),t=function(){var t,r=(t=regeneratorRuntime.mark((function n(){var t;return regeneratorRuntime.wrap((function(n){for(;;)switch(n.prev=n.next){case 0:t={username:e.username,password:e.password},p.dispatch("app/login",t);case 2:case"end":return n.stop()}}),n)})),function(){var e=this,r=arguments;return new Promise((function(u,o){var a=t.apply(e,r);function i(e){n(a,u,o,i,c,"next",e)}function c(e){n(a,u,o,i,c,"throw",e)}i(void 0)}))});return function(){return r.apply(this,arguments)}}();return function(){return i("div",{class:"login"},[i("h1",null,[c("用户登陆"),i(l,{to:"/forget",class:"login-forget"},{default:function(){return[c("找回密码")]}})]),i(s,{layout:"vertical",model:e},{default:function(){return[i(s.Item,null,{default:function(){return[i(f,{value:e.username,"onUpdate:value":function(n){return e.username=n},placeholder:"Username"},{prefix:function(){return i(r,{style:"color: rgba(0, 0, 0, 0.25)"},null)}})]}}),i(s.Item,null,{default:function(){return[i(f,{value:e.password,"onUpdate:value":function(n){return e.password=n},type:"password",placeholder:"Password"},{prefix:function(){return i(u,{style:"color: rgba(0, 0, 0, 0.25)"},null)}})]}}),i(s.Item,null,{default:function(){return[i(d,{block:!0,type:"primary","html-type":"submit",onClick:t},{default:function(){return[c("登 录")]}})]}})]}})])}}}))}}}))}();
import{d as e,U as a,L as s,u as l,r as o,c as r,b as t,j as n,F as u,I as d,B as p}from"./vendor.98dba853.js";/* empty css */import{s as m}from"./index.f608f11e.js";const i=e({name:"app",components:{UserOutlined:a,LockOutlined:s},setup(){l();const e=o({username:"",password:""}),i=async()=>{const a={username:e.username,password:e.password};m.dispatch("app/login",a)};return()=>r("div",{class:"login"},[r("h1",null,[t("用户登陆"),r(n,{to:"/forget",class:"login-forget"},{default:()=>[t("找回密码")]})]),r(u,{layout:"vertical",model:e},{default:()=>[r(u.Item,null,{default:()=>[r(d,{value:e.username,"onUpdate:value":a=>e.username=a,placeholder:"Username"},{prefix:()=>r(a,{style:"color: rgba(0, 0, 0, 0.25)"},null)})]}),r(u.Item,null,{default:()=>[r(d,{value:e.password,"onUpdate:value":a=>e.password=a,type:"password",placeholder:"Password"},{prefix:()=>r(s,{style:"color: rgba(0, 0, 0, 0.25)"},null)})]}),r(u.Item,null,{default:()=>[r(p,{block:!0,type:"primary","html-type":"submit",onClick:i},{default:()=>[t("登 录")]})]})]})])}});export{i as default};
.dataset .database-config{padding:10px 10px 0 0;border:1px solid #eee;border-radius:4px;margin-left:0;margin-bottom:20px}.dataset .database-config h3{width:100px;text-align:right}.dataset .database-config .ant-form-item{margin-bottom:10px}.dataset.sql{display:flex;flex-direction:row}.dataset.sql .ant-form-item-explain,.dataset.sql .ant-form-item-extra{min-height:0!important}.dataset.sql .ant-form-item{margin-bottom:14px}.dataset.sql .sql-form{width:100%}.dataset.sql .sql-test{width:0}.dataset.sql .sql-test h3{display:none}.dataset.sql.test .sql-form{width:50%}.dataset.sql.test .sql-test{border:1px solid green;background-color:#f5f5f5;border-radius:2px;height:100%;width:48%;margin:0 1% 24px;padding:12px;align-self:flex-end;box-sizing:border-box}.dataset.sql.test .sql-test h3{display:block;color:green}.sql-table .ant-table.ant-table-bordered>.ant-table-container>.ant-table-content>table>thead>tr>th{background-color:#3896de;color:#fff;text-align:center}.label-info{line-height:24px;height:24px;font-size:14px;color:#717171}.label-info-label{display:inline-block;font-weight:600;min-width:80px}.label-info-list{padding:0 40px}.label-info-list-row{border-bottom:1px dashed #ccc}
System.register(["./index-legacy.90883ee8.js","./vendor-legacy.bbc7855f.js"],(function(e){"use strict";var t,n,a,l,i,d,u,r,o,c,s,f,_,v,m,p,y,b,g,h,x,k,I,w,C,F;return{setters:[function(e){t=e.s,n=e.r},function(e){a=e.d,l=e.k,i=e.o,d=e.J,u=e.r,r=e.F,o=e.c,c=e.b,s=e.W,f=e.X,_=e.B,v=e.Y,m=e.Z,p=e.$,y=e.V,b=e.Q,g=e.H,h=e.K,x=e.x,k=e.I,I=e.O,w=e.P,C=e.T,F=e.m}],execute:function(){var D={add:"新增",modify:"修改",delete:"删除",none:""};e("default",a({name:"ProductDataSet",setup:function(){var e=l();console.log("route:: ",e),i((function(){t.dispatch("dataset/getDatasetFields",{dataset_id:e.query.dataset_id,page_size:10,page_index:1})}));var a=d([]),q=d("none"),z=d(""),O=u({id:0,field_name:"",field_type:0,field_describe:""}),S=u({field_name:[{required:!0,message:"请输入字段名称"}],field_type:[{required:!0,message:"请输入字段类型"}],field_describe:[{required:!0,message:"请输入字段说明"}]}),T=r.useForm(O,S),R=T.resetFields,U=T.validate,j=T.validateInfos,K=(T.mergeValidateInfo,function(){R(),q.value="add"}),P=function(e){console.log("delete item:: ",e);var n=e instanceof Array?e:[e.id];b.confirm({title:"确定要删除吗?",okText:"确定",cancelText:"取消",onOk:function(){t.dispatch("dataset/deleteDatasetFields",{list_id:n})}})},V=[{title:"ID",dataIndex:"id",resizable:!0,width:50},{title:"所属数据集",dataIndex:"dataset_id",resizable:!0,width:200,customRender:function(t){return t.record,o(g,null,[e.query.dataset_name])}},{title:"字段名称",dataIndex:"field_name"},{title:"字段类型",dataIndex:"field_type",customRender:function(e){var t=e.record;return o(g,null,[0==t.field_type?"数字":"字符串"])}},{title:"说明",dataIndex:"field_describe"},{title:"创建时间",dataIndex:"create_time",resizable:!0,width:200},{key:"action",title:"操作",width:135,customRender:function(e){var t=e.record;return o(g,null,[o(_,{size:"small",type:"primary",style:{margin:"0 5px 5px 0"},onClick:function(){return e=t,O.field_name=e.field_name,O.field_type=e.field_type,O.field_describe=e.field_describe,O.id=e.id,void(q.value="modify");var e}},{default:function(){return[c("修改")]}}),o(_,{size:"small",type:"ghost",danger:!0,onClick:function(){return P(t)}},{default:function(){return[c("删除")]}})])}}];return function(){return o(g,null,[o("h2",{class:"title"},[c("字段配置管理"),o(s,{onSearch:function(e){z.value=e},class:"title-search",placeholder:"请输入字段名称"},null)]),o(f,null,null),o("div",{class:"table"},[o(_,{type:"ghost",danger:!0,icon:o(v,null,null),class:"table-delete ".concat(0==a.value.length?"hidden":""),onClick:function(){return P(a.value)}},{default:function(){return[c("删除")]}}),o(_,{type:"primary",icon:o(m,null,null),class:"table-add",onClick:K},{default:function(){return[c("新增")]}}),o(_,{type:"ghost",icon:o(p,null,null),class:"table-back",onClick:function(){return n.back()}},{default:function(){return[c("返回")]}}),o(y,{rowKey:"id",bordered:!0,loading:t.state.dataset.loading,columns:V,dataSource:(l=t.state.dataset.datasetsFields,z.value?l.filter((function(e){return e.field_name.indexOf(z.value)>-1})):l),rowSelection:{onChange:function(e){a.value=e}}},null)]),o(b,{title:function(){return o(g,null,[o("strong",null,[D[q.value]]),c(" 字段集配置")])},width:800,visible:"none"!=q.value,closable:!0,onOk:function(){console.log("itemData:: ",O),U().then((function(){O.dataset_id=parseInt(e.query.dataset_id),0==O.id?t.dispatch("dataset/postDatasetField",O):t.dispatch("dataset/putDatasetField",O),q.value="none"})).catch((function(e){F.warn("表单字段填写有误,请检查后再次提交")}))},okText:"确定",cancelText:"取消",onCancel:function(){return q.value="none"},destroyOnClose:!1},{default:function(){return[o(r,{name:"field"},{default:function(){return[o(h,x({label:"字段名称"},j.field_name),{default:function(){return[o(k,{value:O.field_name,"onUpdate:value":function(e){return O.field_name=e}},null)]}}),o(h,x({label:"字段类型"},j.field_type),{default:function(){return[o(I,{value:O.field_type,"onUpdate:value":function(e){return O.field_type=e}},{default:function(){return[o(w,{value:0},{default:function(){return[c("数字")]}}),o(w,{value:1},{default:function(){return[c("字符串")]}})]}})]}}),o(h,x({label:"字段信息"},j.field_describe),{default:function(){return[o(C,{value:O.field_describe,"onUpdate:value":function(e){return O.field_describe=e}},null)]}})]}})]}})]);var l}}}))}}}));
import{s as e,r as a}from"./index.f608f11e.js";import{d as l,k as t,o as d,J as s,r as i,F as n,c as o,b as r,W as u,X as c,B as f,Y as m,Z as _,$ as p,V as v,Q as y,H as b,K as g,x as h,I as x,O as k,P as I,T as w,m as C}from"./vendor.98dba853.js";const F={add:"新增",modify:"修改",delete:"删除",none:""},D=l({name:"ProductDataSet",setup(){const l=t();console.log("route:: ",l),d((()=>{e.dispatch("dataset/getDatasetFields",{dataset_id:l.query.dataset_id,page_size:10,page_index:1})}));const D=s([]),q=s("none"),z=s(""),O=i({id:0,field_name:"",field_type:0,field_describe:""}),T=i({field_name:[{required:!0,message:"请输入字段名称"}],field_type:[{required:!0,message:"请输入字段类型"}],field_describe:[{required:!0,message:"请输入字段说明"}]}),{resetFields:S,validate:R,validateInfos:U,mergeValidateInfo:j}=n.useForm(O,T),K=()=>{S(),q.value="add"},P=a=>{console.log("delete item:: ",a);const l=a instanceof Array?a:[a.id];y.confirm({title:"确定要删除吗?",okText:"确定",cancelText:"取消",onOk:()=>{e.dispatch("dataset/deleteDatasetFields",{list_id:l})}})},V=[{title:"ID",dataIndex:"id",resizable:!0,width:50},{title:"所属数据集",dataIndex:"dataset_id",resizable:!0,width:200,customRender:({record:e})=>o(b,null,[l.query.dataset_name])},{title:"字段名称",dataIndex:"field_name"},{title:"字段类型",dataIndex:"field_type",customRender:({record:e})=>o(b,null,[0==e.field_type?"数字":"字符串"])},{title:"说明",dataIndex:"field_describe"},{title:"创建时间",dataIndex:"create_time",resizable:!0,width:200},{key:"action",title:"操作",width:135,customRender:({record:e})=>o(b,null,[o(f,{size:"small",type:"primary",style:{margin:"0 5px 5px 0"},onClick:()=>{return a=e,O.field_name=a.field_name,O.field_type=a.field_type,O.field_describe=a.field_describe,O.id=a.id,void(q.value="modify");var a}},{default:()=>[r("修改")]}),o(f,{size:"small",type:"ghost",danger:!0,onClick:()=>P(e)},{default:()=>[r("删除")]})])}];return()=>{return o(b,null,[o("h2",{class:"title"},[r("字段配置管理"),o(u,{onSearch:e=>{z.value=e},class:"title-search",placeholder:"请输入字段名称"},null)]),o(c,null,null),o("div",{class:"table"},[o(f,{type:"ghost",danger:!0,icon:o(m,null,null),class:"table-delete "+(0==D.value.length?"hidden":""),onClick:()=>P(D.value)},{default:()=>[r("删除")]}),o(f,{type:"primary",icon:o(_,null,null),class:"table-add",onClick:K},{default:()=>[r("新增")]}),o(f,{type:"ghost",icon:o(p,null,null),class:"table-back",onClick:()=>a.back()},{default:()=>[r("返回")]}),o(v,{rowKey:"id",bordered:!0,loading:e.state.dataset.loading,columns:V,dataSource:(t=e.state.dataset.datasetsFields,z.value?t.filter((e=>e.field_name.indexOf(z.value)>-1)):t),rowSelection:{onChange:e=>{D.value=e}}},null)]),o(y,{title:()=>o(b,null,[o("strong",null,[F[q.value]]),r(" 字段集配置")]),width:800,visible:"none"!=q.value,closable:!0,onOk:()=>{console.log("itemData:: ",O),R().then((()=>{O.dataset_id=parseInt(l.query.dataset_id),0==O.id?e.dispatch("dataset/postDatasetField",O):e.dispatch("dataset/putDatasetField",O),q.value="none"})).catch((e=>{C.warn("表单字段填写有误,请检查后再次提交")}))},okText:"确定",cancelText:"取消",onCancel:()=>q.value="none",destroyOnClose:!1},{default:()=>[o(n,{name:"field"},{default:()=>[o(g,h({label:"字段名称"},U.field_name),{default:()=>[o(x,{value:O.field_name,"onUpdate:value":e=>O.field_name=e},null)]}),o(g,h({label:"字段类型"},U.field_type),{default:()=>[o(k,{value:O.field_type,"onUpdate:value":e=>O.field_type=e},{default:()=>[o(I,{value:0},{default:()=>[r("数字")]}),o(I,{value:1},{default:()=>[r("字符串")]})]})]}),o(g,h({label:"字段信息"},U.field_describe),{default:()=>[o(w,{value:O.field_describe,"onUpdate:value":e=>O.field_describe=e},null)]})]})]})]);var t}}});export{D as default};
System.register(["./index-legacy.90883ee8.js","./vendor-legacy.bbc7855f.js"],(function(e){"use strict";var t,n,a,l,r,u,i,o,c,d,s,f,p,m,v,_,g,h,b,y,x,w,k,z,C,T,S,j,I,U,D;return{setters:[function(e){t=e.s,n=e.U,a=e.D},function(e){l=e.d,r=e.o,u=e.J,i=e.r,o=e.a0,c=e.F,d=e.c,s=e.b,f=e.W,p=e.X,m=e.B,v=e.Y,_=e.Z,g=e.V,h=e.Q,b=e.H,y=e.K,x=e.x,w=e.I,k=e.a1,z=e.a2,C=e.T,T=e.O,S=e.P,j=e.a3,I=e.l,U=e.m,D=e._}],execute:function(){var O={add:"新增",modify:"修改",report:"报告",none:""};e("default",l({name:"ProductDataSet",setup:function(){r((function(){e(1,10)}));var e=function(e,n){t.dispatch("template/getTemplates",{page_size:n,page_index:e})},l=u([]),q=u("none"),Y=u([]),F=i({range:[o().add(-1,"week"),o()],type:0}),R=i({start_time:"",end_time:"",report_type:0}),H=function(e){switch(console.log("v::",e.target.value),e.target.value){case 0:F.range[0]=F.range[1].add(-1,"week");break;case 1:F.range[0]=F.range[1].add(-1,"month");break;case 2:F.range[0]=F.range[1].add(-3,"month");break;case 3:F.range[0]=F.range[1].add(-1,"year")}},L=i({id:0,user_id:0,project_name:"",datasource_name:"",template_name:"",template_path:"",template_describe:"",topic_name:""}),M=function(e){Y.value=e.fileList,"done"==e.file.status&&e.file.response.isHandleSuccess&&(L.template_path=e.file.response.resultData.file_path,Y.value=[])},P=function(){R.start_time=F.range[0].toString(),R.end_time=F.range[1].toString();var e=I.stringify(R),t=document.createElement("a");t.href="/api/report?"+e,t.download=t.href,t.click(),t.remove(),U.success("报告生成成功")},A=i({project_name:[{required:!0,message:"请输入项目名称"}],datasource_name:[{required:!0,message:"请输入数据源名称"}],template_name:[{required:!0,message:"请输入报告模板名称"}],template_path:[{required:!0,message:"请上传报告模板"}],template_describe:[{required:!0,message:"请输入报告模板说明"}]}),B=c.useForm(L,A),J=B.resetFields,K=B.validate,N=B.validateInfos,Q=(B.mergeValidateInfo,function(){J(),Y.value=[],q.value="add"}),V=function(e){console.log("delete item:: ",e);var n=e instanceof Array?e:[e.id];h.confirm({title:"确定要删除吗?",okText:"确定",cancelText:"取消",onOk:function(){t.dispatch("template/deleteTemplates",{list_id:n})}})},W=[{title:"NO",dataIndex:"id",resizable:!0,width:50,customRender:function(e){return e.record,e.index+1}},{title:"所属项目",dataIndex:"project_name",resizable:!0,width:200},{title:"数据源",dataIndex:"datasource_name",resizable:!0,width:200},{title:"报告名称",dataIndex:"template_name",resizable:!0,width:200,customRender:function(e){var t=e.record;return d(b,null,[t.template_name,d(D,{title:"点击开始配置报告周期"},{default:function(){return[d(m,{onClick:function(){return e=t,R.template_id=e.id,void(q.value="report");var e},size:"small",type:"primary",ghost:!0},{default:function(){return[s("生成报告")]}})]}})])}},{title:"模板地址",dataIndex:"template_path",resizable:!0,width:200,customRender:function(e){var t=e.record;return d(D,{title:"点击下载模板文件"},{default:function(){return[d("a",{href:a+t.template_path,download:a+t.template_path},[t.template_path])]}})}},{title:"说明",dataIndex:"template_describe",resizable:!0,width:200},{title:"创建时间",dataIndex:"create_time",resizable:!0,width:200},{key:"action",title:"操作",width:80,customRender:function(e){var t=e.record;return d(b,null,[d(m,{size:"small",type:"primary",style:{margin:"0 5px 5px 0"},onClick:function(){return e=t,Y.value=[],L.project_name=e.project_name,L.datasource_name=e.datasource_name,L.template_name=e.template_name,L.template_path=e.template_path,L.template_describe=e.template_describe,L.id=e.id,void(q.value="modify");var e}},{default:function(){return[s("修改")]}}),d(m,{size:"small",type:"ghost",danger:!0,onClick:function(){return V(t)}},{default:function(){return[s("删除")]}})])}}];return function(){return d(b,null,[d("h2",{class:"title"},[s("报告模板配置管理"),d(f,{onSearch:function(){},class:"title-search",placeholder:"请输入报告模板名称"},null)]),d(p,null,null),d("div",{class:"table"},[d(m,{type:"ghost",danger:!0,icon:d(v,null,null),class:"table-delete ".concat(0==l.value.length?"hidden":""),onClick:function(){return V(l.value)}},{default:function(){return[s("删除")]}}),d(m,{type:"primary",icon:d(_,null,null),class:"table-add",onClick:Q},{default:function(){return[s("新增")]}}),d(g,{rowKey:"id",bordered:!0,loading:t.state.template.loading,columns:W,dataSource:t.state.template.templates.rows,rowSelection:{onChange:function(e){l.value=e}},pagination:{showQuickJumper:!0,showSizeChanger:!0,pageSize:t.state.template.templates.pageSize,total:t.state.template.templates.total,current:t.state.template.templates.pageNo,onChange:function(t,n){return e(t,n)}}},null)]),d(h,{title:function(){return d(b,null,[d("strong",null,[O[q.value]]),s(" 报告模板配置")])},width:800,visible:"add"==q.value||"modify"==q.value,closable:!0,onOk:function(){console.log("itemData:: ",L),K().then((function(){0==L.id?t.dispatch("template/postTemplate",L):t.dispatch("template/putTemplate",L),q.value="none"})).catch((function(e){U.warn("表单数据填写有误,请检查后再次提交")}))},okText:"确定",cancelText:"取消",onCancel:function(){return q.value="none"},destroyOnClose:!1},{default:function(){return[d(c,{name:"template"},{default:function(){return[d(y,x({label:"所属项目"},N.project_name),{default:function(){return[d(w,{value:L.project_name,"onUpdate:value":function(e){return L.project_name=e}},null)]}}),d(y,x({label:"数据源"},N.datasource_name),{default:function(){return[d(w,{value:L.datasource_name,"onUpdate:value":function(e){return L.datasource_name=e}},null)]}}),d(y,x({label:"报告名称"},N.template_name),{default:function(){return[d(w,{value:L.template_name,"onUpdate:value":function(e){return L.template_name=e}},null)]}}),d(y,x({label:"模板文件"},N.template_path),{default:function(){return[d(w,{value:L.template_path,"onUpdate:value":function(e){return L.template_path=e},style:{width:"calc( 100% - 86px )"}},null)," ",d(k,{action:n,onChange:M,fileList:Y.value},{default:function(){return[d(m,{icon:d(z,null,null)},{default:function(){return[s("上传")]}})]}})]}}),d(y,x({label:"模板信息"},N.template_describe),{default:function(){return[d(C,{value:L.template_describe,"onUpdate:value":function(e){return L.template_describe=e}},null)]}})]}})]}}),d(h,{title:function(){return"报告配置生成"},visible:"report"==q.value,okText:"生成",cancelText:"取消",onCancel:function(){return q.value="none"},onOk:P,destroyOnClose:!1},{default:function(){return[d("h2",{style:{width:"100%",textAlign:"center",marginBottom:"20px"}},[s("请配置报告的类型及周期")]),d(c,null,{default:function(){return[d(y,{label:"报告类型"},{default:function(){return[d(T,{value:R.report_type,"onUpdate:value":function(e){return R.report_type=e}},{default:function(){return[d(S,{value:0},{default:function(){return[s("Word")]}}),d(S,{value:1},{default:function(){return[s("PDF")]}}),d(S,{value:2},{default:function(){return[s("HTML")]}})]}})]}}),d(y,{label:"报告周期"},{default:function(){return[d(T,{onChange:H,value:F.type,"onUpdate:value":function(e){return F.type=e}},{default:function(){return[d(S,{value:0},{default:function(){return[s("周报")]}}),d(S,{value:1},{default:function(){return[s("月报")]}}),d(S,{value:2},{default:function(){return[s("季报")]}}),d(S,{value:3},{default:function(){return[s("年报")]}}),d(S,{value:4},{default:function(){return[s("自定义")]}})]}})]}}),d(y,{label:"选择日期"},{default:function(){return[d(j,{format:"YYYY-MM-DD",value:F.range,"onUpdate:value":function(e){return F.range=e}},null)]}})]}})]}})])}}}))}}}));
import{s as e,U as a,D as t}from"./index.f608f11e.js";import{d as l,o as n,J as d,r as s,a0 as r,F as o,c as i,b as u,W as p,X as m,B as c,Y as _,Z as v,V as f,Q as h,H as g,K as b,x as y,I as x,a1 as w,a2 as k,T as z,O as C,P as T,a3 as j,l as I,m as S,_ as U}from"./vendor.98dba853.js";const D={add:"新增",modify:"修改",report:"报告",none:""},O=l({name:"ProductDataSet",setup(){n((()=>{l(1,10)}));const l=(a,t)=>{e.dispatch("template/getTemplates",{page_size:t,page_index:a})},O=d([]),q=d("none"),Y=d([]),F=s({range:[r().add(-1,"week"),r()],type:0}),R=s({start_time:"",end_time:"",report_type:0}),H=e=>{switch(console.log("v::",e.target.value),e.target.value){case 0:F.range[0]=F.range[1].add(-1,"week");break;case 1:F.range[0]=F.range[1].add(-1,"month");break;case 2:F.range[0]=F.range[1].add(-3,"month");break;case 3:F.range[0]=F.range[1].add(-1,"year")}},L=s({id:0,user_id:0,project_name:"",datasource_name:"",template_name:"",template_path:"",template_describe:"",topic_name:""}),M=e=>{Y.value=e.fileList,"done"==e.file.status&&e.file.response.isHandleSuccess&&(L.template_path=e.file.response.resultData.file_path,Y.value=[])},P=()=>{R.start_time=F.range[0].toString(),R.end_time=F.range[1].toString();const e=I.stringify(R),a=document.createElement("a");a.href="/api/report?"+e,a.download=a.href,a.click(),a.remove(),S.success("报告生成成功")},A=s({project_name:[{required:!0,message:"请输入项目名称"}],datasource_name:[{required:!0,message:"请输入数据源名称"}],template_name:[{required:!0,message:"请输入报告模板名称"}],template_path:[{required:!0,message:"请上传报告模板"}],template_describe:[{required:!0,message:"请输入报告模板说明"}]}),{resetFields:B,validate:J,validateInfos:K,mergeValidateInfo:N}=o.useForm(L,A),Q=()=>{B(),Y.value=[],q.value="add"},V=a=>{console.log("delete item:: ",a);const t=a instanceof Array?a:[a.id];h.confirm({title:"确定要删除吗?",okText:"确定",cancelText:"取消",onOk:()=>{e.dispatch("template/deleteTemplates",{list_id:t})}})},W=[{title:"NO",dataIndex:"id",resizable:!0,width:50,customRender:({record:e,index:a})=>a+1},{title:"所属项目",dataIndex:"project_name",resizable:!0,width:200},{title:"数据源",dataIndex:"datasource_name",resizable:!0,width:200},{title:"报告名称",dataIndex:"template_name",resizable:!0,width:200,customRender:({record:e})=>i(g,null,[e.template_name,i(U,{title:"点击开始配置报告周期"},{default:()=>[i(c,{onClick:()=>{return a=e,R.template_id=a.id,void(q.value="report");var a},size:"small",type:"primary",ghost:!0},{default:()=>[u("生成报告")]})]})])},{title:"模板地址",dataIndex:"template_path",resizable:!0,width:200,customRender:({record:e})=>i(U,{title:"点击下载模板文件"},{default:()=>[i("a",{href:t+e.template_path,download:t+e.template_path},[e.template_path])]})},{title:"说明",dataIndex:"template_describe",resizable:!0,width:200},{title:"创建时间",dataIndex:"create_time",resizable:!0,width:200},{key:"action",title:"操作",width:80,customRender:({record:e})=>i(g,null,[i(c,{size:"small",type:"primary",style:{margin:"0 5px 5px 0"},onClick:()=>{return a=e,Y.value=[],L.project_name=a.project_name,L.datasource_name=a.datasource_name,L.template_name=a.template_name,L.template_path=a.template_path,L.template_describe=a.template_describe,L.id=a.id,void(q.value="modify");var a}},{default:()=>[u("修改")]}),i(c,{size:"small",type:"ghost",danger:!0,onClick:()=>V(e)},{default:()=>[u("删除")]})])}];return()=>i(g,null,[i("h2",{class:"title"},[u("报告模板配置管理"),i(p,{onSearch:()=>{},class:"title-search",placeholder:"请输入报告模板名称"},null)]),i(m,null,null),i("div",{class:"table"},[i(c,{type:"ghost",danger:!0,icon:i(_,null,null),class:"table-delete "+(0==O.value.length?"hidden":""),onClick:()=>V(O.value)},{default:()=>[u("删除")]}),i(c,{type:"primary",icon:i(v,null,null),class:"table-add",onClick:Q},{default:()=>[u("新增")]}),i(f,{rowKey:"id",bordered:!0,loading:e.state.template.loading,columns:W,dataSource:e.state.template.templates.rows,rowSelection:{onChange:e=>{O.value=e}},pagination:{showQuickJumper:!0,showSizeChanger:!0,pageSize:e.state.template.templates.pageSize,total:e.state.template.templates.total,current:e.state.template.templates.pageNo,onChange:(e,a)=>l(e,a)}},null)]),i(h,{title:()=>i(g,null,[i("strong",null,[D[q.value]]),u(" 报告模板配置")]),width:800,visible:"add"==q.value||"modify"==q.value,closable:!0,onOk:()=>{console.log("itemData:: ",L),J().then((()=>{0==L.id?e.dispatch("template/postTemplate",L):e.dispatch("template/putTemplate",L),q.value="none"})).catch((e=>{S.warn("表单数据填写有误,请检查后再次提交")}))},okText:"确定",cancelText:"取消",onCancel:()=>q.value="none",destroyOnClose:!1},{default:()=>[i(o,{name:"template"},{default:()=>[i(b,y({label:"所属项目"},K.project_name),{default:()=>[i(x,{value:L.project_name,"onUpdate:value":e=>L.project_name=e},null)]}),i(b,y({label:"数据源"},K.datasource_name),{default:()=>[i(x,{value:L.datasource_name,"onUpdate:value":e=>L.datasource_name=e},null)]}),i(b,y({label:"报告名称"},K.template_name),{default:()=>[i(x,{value:L.template_name,"onUpdate:value":e=>L.template_name=e},null)]}),i(b,y({label:"模板文件"},K.template_path),{default:()=>[i(x,{value:L.template_path,"onUpdate:value":e=>L.template_path=e,style:{width:"calc( 100% - 86px )"}},null)," ",i(w,{action:a,onChange:M,fileList:Y.value},{default:()=>[i(c,{icon:i(k,null,null)},{default:()=>[u("上传")]})]})]}),i(b,y({label:"模板信息"},K.template_describe),{default:()=>[i(z,{value:L.template_describe,"onUpdate:value":e=>L.template_describe=e},null)]})]})]}),i(h,{title:()=>"报告配置生成",visible:"report"==q.value,okText:"生成",cancelText:"取消",onCancel:()=>q.value="none",onOk:P,destroyOnClose:!1},{default:()=>[i("h2",{style:{width:"100%",textAlign:"center",marginBottom:"20px"}},[u("请配置报告的类型及周期")]),i(o,null,{default:()=>[i(b,{label:"报告类型"},{default:()=>[i(C,{value:R.report_type,"onUpdate:value":e=>R.report_type=e},{default:()=>[i(T,{value:0},{default:()=>[u("Word")]}),i(T,{value:1},{default:()=>[u("PDF")]}),i(T,{value:2},{default:()=>[u("HTML")]})]})]}),i(b,{label:"报告周期"},{default:()=>[i(C,{onChange:H,value:F.type,"onUpdate:value":e=>F.type=e},{default:()=>[i(T,{value:0},{default:()=>[u("周报")]}),i(T,{value:1},{default:()=>[u("月报")]}),i(T,{value:2},{default:()=>[u("季报")]}),i(T,{value:3},{default:()=>[u("年报")]}),i(T,{value:4},{default:()=>[u("自定义")]})]})]}),i(b,{label:"选择日期"},{default:()=>[i(j,{format:"YYYY-MM-DD",value:F.range,"onUpdate:value":e=>F.range=e},null)]})]})]})])}});export{O as default};
!function(){function t(t,e){var r=Object.keys(t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(t);e&&(n=n.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),r.push.apply(r,n)}return r}function e(e){for(var n=1;n<arguments.length;n++){var c=null!=arguments[n]?arguments[n]:{};n%2?t(Object(c),!0).forEach((function(t){r(e,t,c[t])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(c)):t(Object(c)).forEach((function(t){Object.defineProperty(e,t,Object.getOwnPropertyDescriptor(c,t))}))}return e}function r(t,e,r){return e in t?Object.defineProperty(t,e,{value:r,enumerable:!0,configurable:!0,writable:!0}):t[e]=r,t}System.register(["./vendor-legacy.bbc7855f.js","./index-legacy.90883ee8.js"],(function(t){"use strict";var r,n,c,u,i,o,l,a;return{setters:[function(t){r=t.c,n=t.E,c=t.G,u=t.d,i=t.o,o=t.R,l=t.H},function(t){a=t.s}],execute:function(){var p=[{title:"模板管理",path:"/product-template",icon:r(n,null,null),children:[{title:"模板管理",path:"/product/template"}]},{title:"数据源配置",path:"/product-dataset",icon:r(c,null,null),children:[{title:"API数据集配置",path:"/product/urldataset"},{title:"SQL数据集配置",path:"/product/sqldataset"}]}];t("default",u({name:"app",setup:function(){return i((function(){a.commit("app/set_sider",e(e({},a.state.app.sider),{},{hidden:!1,menus:p,title:"报告配置管理"}))})),function(){return r(l,null,[r(o,null,null)])}}}))}}}))}();
!function(){var e=document.createElement("style");e.innerHTML='.header{width:100%;height:64px;background:white;box-shadow:0 0 5px 2px #ddd;line-height:64px;display:flex;padding:0 25px;position:fixed;z-index:1;top:0}.header-logo{margin-right:40px}.header-logo-img{width:56px;height:56px;position:relative;margin-right:6px;font-size:45px}.header-logo-title{font-size:24px}.header-menu{display:flex;height:64px;line-height:64px;font-size:18px;margin:0 20px}.header-menu .menuItem{color:#2086ff;margin:0 16px;position:relative;transition:all .4s}.header-menu .menuItem:before{content:" ";position:absolute;width:0;border-radius:4px;height:3px;background-color:#2086ff;box-sizing:border-box;left:0;right:0;top:52px;transition:width .4s;margin:auto}.header-menu .menuItem.active,.header-menu .menuItem:hover{color:#0046ef}.header-menu .menuItem.active:before,.header-menu .menuItem:hover:before{background-color:#0046ef;display:block;width:50%}.header-user{margin-left:auto;font-size:18px;padding:0 15px;min-width:136px;text-align:center}.header-user>a{color:#2086ff;transition:all .3s}.header-user>a:hover{color:#0046ef}.header-user>a:hover>.circle{border-color:#0046ef}.header-user>a>.circle{padding:2px;border-radius:50%;border:2px solid #2086ff;position:relative;margin-right:6px}.header-user-menu>.ant-dropdown-menu-item{padding:6px 40px;color:#2086ff}.header-user-menu>.ant-dropdown-menu-item:hover{color:#0046ef}.siderbar-menu{padding:20px 0}.siderbar-menu .ant-menu{border-right:none;text-align:center;font-weight:400;font-size:15px;transition:all .4s}.siderbar-menu .ant-menu .ant-menu-submenu{margin-bottom:5px}.siderbar-menu .ant-menu .ant-menu-submenu .ant-menu-submenu-title{padding-right:12px}.siderbar-menu .ant-menu .ant-menu-submenu .ant-menu-title-content,.siderbar-menu .ant-menu .ant-menu-item .ant-menu-title-content{margin-right:8px}.siderbar-menu .ant-menu .ant-menu-submenu.ant-menu-item-selected,.siderbar-menu .ant-menu .ant-menu-item.ant-menu-item-selected,.siderbar-menu .ant-menu .ant-menu-submenu:hover,.siderbar-menu .ant-menu .ant-menu-item:hover{font-weight:400;background-color:#def}.siderbar-menu .ant-menu .ant-menu-submenu.ant-menu-item-selected .ant-menu-item-icon,.siderbar-menu .ant-menu .ant-menu-item.ant-menu-item-selected .ant-menu-item-icon,.siderbar-menu .ant-menu .ant-menu-submenu:hover .ant-menu-item-icon,.siderbar-menu .ant-menu .ant-menu-item:hover .ant-menu-item-icon{border:1px solid #2186ff}.siderbar-menu .ant-menu .ant-menu-submenu .ant-menu-item-icon,.siderbar-menu .ant-menu .ant-menu-item .ant-menu-item-icon{transition:all .3s;border:1px solid gray;font-weight:600;width:30px;height:30px;line-height:30px;border-radius:50%}.siderbar-menu .ant-menu .ant-menu-sub.ant-menu-inline>.ant-menu-item,.siderbar-menu .ant-menu .ant-menu-sub.ant-menu-inline>.ant-menu-submenu>.ant-menu-submenu-title{margin:0}.siderbar-menu-collapsed{position:absolute;bottom:0;right:0}.home{display:flex;padding-top:64px;width:100%;box-sizing:border-box}.home-content{width:80%;overflow-y:auto;overflow-x:hidden;margin-left:20%;padding:8px;min-height:calc(100vh - 68px)}.home-content-div{width:100%;height:100%;background:white;padding:16px}.home-siderbar{width:20%;background:white;text-align:center;height:calc(100vh - 64px);box-sizing:border-box;position:fixed}.home-info{width:20%;margin:12px 12px 12px 0;background:white;padding:24px}.loading-page{width:100%;height:100%;display:flex;flex-direction:column;justify-content:center;align-items:center;padding:20%}\n',document.head.appendChild(e),System.register(["./vendor-legacy.bbc7855f.js","./index-legacy.90883ee8.js"],(function(e){"use strict";var n,t,i,r,a,u,o,d,s,l,m,c,h,p,f,g,b,x,v,y,w,K;return{setters:[function(e){n=e.k,t=e.u,i=e.n,r=e.c,a=e.S,u=e.j,o=e.D,d=e.U,s=e.p,l=e.M,m=e.q,c=e.s,h=e.b,p=e.d,f=e.r,g=e.t,b=e.v,x=e.w,v=e.x,y=e.y,w=e.R},function(e){K=e.s}],execute:function(){var k=function(e){var p=n(),f=t(),g=i((function(){return p.path})),b=i((function(){return"string"==typeof e.logo?r("img",{src:e.logo,alt:"logo",class:"header-logo-img"},null):e.logo}));return r("div",{class:"header"},[r("div",{class:"header-logo"},[b.value||r(a,{class:"header-logo-img"},null),r("span",{class:"header-logo-title"},[e.title])]),r("div",{class:"header-menu"},[e.menus&&e.menus.map((function(e){return r(u,{to:e.path,class:g.value.startsWith(e.path)?"menuItem active":"menuItem"},{default:function(){return[e.title]}})}))]),e.hiddenUser?e.rightNode:r("div",{class:"header-user"},[r(o,{trigger:["click","hover"]},{default:function(){return[r("a",{class:"ant-dropdown-link"},[r(d,{class:"circle"},null),e.user&&e.user.info?e.user.info.username:""," ",r(s,null,null)])]},overlay:function(){return r(l,{class:"header-user-menu"},{default:function(){return[e.userLinks&&e.userLinks.map((function(e){return r(m,{onClick:function(){return f.push(e.path)}},{default:function(){return[e.title]}})})),r(c,null,null),r(m,{onClick:function(){return e.user&&e.user.logout&&e.user.logout()}},{default:function(){return[h("退出")]}})]}})}})])])};var j=p({name:"SiderBar",props:{menus:{type:Object,require:!1},classNames:{type:Array,required:!1},hidden:{type:Boolean,required:!1},width:{type:Number,required:!1},theme:{type:String,required:!1},collapsed:{type:Boolean,required:!1},title:{type:String,required:!1}},setup:function(e,i){var a,u=i.slots;console.log("props:: ",e);var o=t(),d=n(),s=f({rootSubmenuKeys:(null===(a=e.menus)||void 0===a?void 0:a.filter((function(e){return e.children&&e.children.length})).map((function(e){return e.path})))||[],openKeys:[],selectedKeys:[]});g((function(){if(!(s.selectedKeys.length>0||s.openKeys.length>0)){var n=e.menus?function e(n){var t=n.find((function(n){return d.path==n.path||n.children&&n.children.length&&e(n.children)}));if(t)return(null==t?void 0:t.path)==d.path?t:(s.openKeys=[t.path],e(null==t?void 0:t.children))}(e.menus):null,t=n&&n.path?[n.path]:[];t.toString()!=s.selectedKeys.toString()&&(s.selectedKeys=t)}}));var c=function(e){var n=e.find((function(e){return-1===s.openKeys.indexOf(e)}));-1===s.rootSubmenuKeys.indexOf(n)?s.openKeys=e:s.openKeys=n?[n]:[]};console.log("selectedKeys",d.path,s.selectedKeys,e.collapsed);var h=function e(n){var t;if(n&&n.children&&n.children.length)return r(x,{title:n.title,icon:n.icon,key:n.path},"function"==typeof(i=t=n.children.map((function(n){return e(n)})))||"[object Object]"===Object.prototype.toString.call(i)&&!b(i)?t:{default:function(){return[t]}});var i,a=n;return r(m,v({key:a.path},a,{onClick:function(){a.path&&(s.selectedKeys=[a.path],o.push(a.path))}}),{default:function(){return[a.title]}})};return function(){return r("div",{class:"siderbar-menu"+(e.classNames?e.classNames.join(" "):""),style:{width:e.width||200,display:e.hidden?"none":"block"}},[r("h3",{style:{color:"dark"==e.theme?"#ffffff":"#212121"}},[e.title]),e.menus?r(l,{style:{width:e.width},mode:"inline",selectedKeys:s.selectedKeys,"onUpdate:selectedKeys":function(e){return s.selectedKeys=e},openKeys:s.openKeys,"onUpdate:openKeys":function(e){return s.openKeys=e},onOpenChange:c,selectable:!0,theme:e.theme,inlineCollapsed:e.collapsed},{default:function(){return[e.menus&&e.menus.map((function(e){return h(e)}))]}}):null,u.default?u.default():null])}}}),S=function(e){var n=i((function(){var n=e.sider||{};return n.hidden?{content:{width:"100%",marginLeft:0},sider:{display:"none",width:0}}:{content:{width:"calc( 100% - ".concat(n.width||200,"px )"),marginLeft:"".concat(n.width||200,"px")},sider:{display:"block",width:"".concat(n.width||200,"px")}}}));return r("div",{class:e.classNames?e.classNames.join(" "):""},[r(k,e.header,null),r("div",{class:"home"},[r("div",{class:"home-siderbar",style:n.value.sider},[r(j,e.sider,null)]),r("div",{class:"home-content",style:n.value.content},[r("div",{class:"home-content-div"},[r(w,{name:"default"},null)])])])])};e("default",p({setup:function(){return K.dispatch("app/getCurrent"),function(){return K.state.app.loading?r("div",{class:"loading-page"},[r(y,null,null)]):r(S,{sider:K.state.app.sider,header:K.state.app.header},null)}}}))}}}))}();
This source diff could not be displayed because it is too large. You can view the blob instead.
!function(){function e(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function t(t){for(var n=1;n<arguments.length;n++){var c=null!=arguments[n]?arguments[n]:{};n%2?e(Object(c),!0).forEach((function(e){r(t,e,c[e])})):Object.getOwnPropertyDescriptors?Object.defineProperties(t,Object.getOwnPropertyDescriptors(c)):e(Object(c)).forEach((function(e){Object.defineProperty(t,e,Object.getOwnPropertyDescriptor(c,e))}))}return t}function r(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}System.register(["./index-legacy.90883ee8.js","./vendor-legacy.bbc7855f.js"],(function(e){"use strict";var r,n,c,o,i;return{setters:[function(e){r=e.s},function(e){n=e.d,c=e.c,o=e.b,i=e.H}],execute:function(){e("default",n({name:"app",setup:function(){return r.commit("app/set_sider",t(t({},r.state.app.sider),{},{hidden:!0})),function(){return c(i,null,[c("h1",null,[o("这是About页面")])])}}}))}}}))}();
import{k as e,u as s,n as t,c as l,S as n,j as a,D as d,U as o,p as i,M as r,q as c,s as u,b as p,d as h,r as m,t as y,v as f,w as g,x as v,y as K,R as b}from"./vendor.98dba853.js";import{s as w}from"./index.f608f11e.js";const k=h=>{const m=e(),y=s(),f=t((()=>m.path)),g=t((()=>"string"==typeof h.logo?l("img",{src:h.logo,alt:"logo",class:"header-logo-img"},null):h.logo));return l("div",{class:"header"},[l("div",{class:"header-logo"},[g.value||l(n,{class:"header-logo-img"},null),l("span",{class:"header-logo-title"},[h.title])]),l("div",{class:"header-menu"},[h.menus&&h.menus.map((e=>l(a,{to:e.path,class:f.value.startsWith(e.path)?"menuItem active":"menuItem"},{default:()=>[e.title]})))]),h.hiddenUser?h.rightNode:l("div",{class:"header-user"},[l(d,{trigger:["click","hover"]},{default:()=>[l("a",{class:"ant-dropdown-link"},[l(o,{class:"circle"},null),h.user&&h.user.info?h.user.info.username:""," ",l(i,null,null)])],overlay:()=>l(r,{class:"header-user-menu"},{default:()=>[h.userLinks&&h.userLinks.map((e=>l(c,{onClick:()=>y.push(e.path)},{default:()=>[e.title]}))),l(u,null,null),l(c,{onClick:()=>h.user&&h.user.logout&&h.user.logout()},{default:()=>[p("退出")]})]})})])])};const j=h({name:"SiderBar",props:{menus:{type:Object,require:!1},classNames:{type:Array,required:!1},hidden:{type:Boolean,required:!1},width:{type:Number,required:!1},theme:{type:String,required:!1},collapsed:{type:Boolean,required:!1},title:{type:String,required:!1}},setup:(t,{slots:n})=>{var a;console.log("props:: ",t);const d=s(),o=e(),i=m({rootSubmenuKeys:(null==(a=t.menus)?void 0:a.filter((e=>e.children&&e.children.length)).map((e=>e.path)))||[],openKeys:[],selectedKeys:[]});y((()=>{if(i.selectedKeys.length>0||i.openKeys.length>0)return;const e=s=>{const t=s.find((s=>o.path==s.path||s.children&&s.children.length&&e(s.children)));if(t)return(null==t?void 0:t.path)==o.path?t:(i.openKeys=[t.path],e(null==t?void 0:t.children))},s=t.menus?e(t.menus):null,l=s&&s.path?[s.path]:[];l.toString()!=i.selectedKeys.toString()&&(i.selectedKeys=l)}));const u=e=>{const s=e.find((e=>-1===i.openKeys.indexOf(e)));-1===i.rootSubmenuKeys.indexOf(s)?i.openKeys=e:i.openKeys=s?[s]:[]};console.log("selectedKeys",o.path,i.selectedKeys,t.collapsed);const p=e=>{if(e&&e.children&&e.children.length){let t;return l(g,{title:e.title,icon:e.icon,key:e.path},"function"==typeof(s=t=e.children.map((e=>p(e))))||"[object Object]"===Object.prototype.toString.call(s)&&!f(s)?t:{default:()=>[t]})}{const s=e;return l(c,v({key:s.path},s,{onClick:()=>{s.path&&(i.selectedKeys=[s.path],d.push(s.path))}}),{default:()=>[s.title]})}var s};return()=>l("div",{class:"siderbar-menu"+(t.classNames?t.classNames.join(" "):""),style:{width:t.width||200,display:t.hidden?"none":"block"}},[l("h3",{style:{color:"dark"==t.theme?"#ffffff":"#212121"}},[t.title]),t.menus?l(r,{style:{width:t.width},mode:"inline",selectedKeys:i.selectedKeys,"onUpdate:selectedKeys":e=>i.selectedKeys=e,openKeys:i.openKeys,"onUpdate:openKeys":e=>i.openKeys=e,onOpenChange:u,selectable:!0,theme:t.theme,inlineCollapsed:t.collapsed},{default:()=>[t.menus&&t.menus.map((e=>p(e)))]}):null,n.default?n.default():null])}});const S=e=>{const s=t((()=>{const s=e.sider||{};return s.hidden?{content:{width:"100%",marginLeft:0},sider:{display:"none",width:0}}:{content:{width:`calc( 100% - ${s.width||200}px )`,marginLeft:`${s.width||200}px`},sider:{display:"block",width:`${s.width||200}px`}}}));return l("div",{class:e.classNames?e.classNames.join(" "):""},[l(k,e.header,null),l("div",{class:"home"},[l("div",{class:"home-siderbar",style:s.value.sider},[l(j,e.sider,null)]),l("div",{class:"home-content",style:s.value.content},[l("div",{class:"home-content-div"},[l(b,{name:"default"},null)])])])])};var q=h({setup:()=>(w.dispatch("app/getCurrent"),()=>w.state.app.loading?l("div",{class:"loading-page"},[l(K,null,null)]):l(S,{sider:w.state.app.sider,header:w.state.app.header},null))});export{q as default};
This source diff could not be displayed because it is too large. You can view the blob instead.
var t=Object.defineProperty,e=Object.defineProperties,r=Object.getOwnPropertyDescriptors,a=Object.getOwnPropertySymbols,l=Object.prototype.hasOwnProperty,p=Object.prototype.propertyIsEnumerable,o=(e,r,a)=>r in e?t(e,r,{enumerable:!0,configurable:!0,writable:!0,value:a}):e[r]=a;import{c as s,E as n,G as i,d as c,o as d,R as u,H as m}from"./vendor.98dba853.js";import{s as b}from"./index.f608f11e.js";const f=[{title:"模板管理",path:"/product-template",icon:s(n,null,null),children:[{title:"模板管理",path:"/product/template"}]},{title:"数据源配置",path:"/product-dataset",icon:s(i,null,null),children:[{title:"API数据集配置",path:"/product/urldataset"},{title:"SQL数据集配置",path:"/product/sqldataset"}]}],h=c({name:"app",setup:()=>(d((()=>{var t;b.commit("app/set_sider",(t=((t,e)=>{for(var r in e||(e={}))l.call(e,r)&&o(t,r,e[r]);if(a)for(var r of a(e))p.call(e,r)&&o(t,r,e[r]);return t})({},b.state.app.sider),e(t,r({hidden:!1,menus:f,title:"报告配置管理"}))))})),()=>s(m,null,[s(u,null,null)]))});export{h as default};
.header{width:100%;height:64px;background:white;box-shadow:0 0 5px 2px #ddd;line-height:64px;display:flex;padding:0 25px;position:fixed;z-index:1;top:0}.header-logo{margin-right:40px}.header-logo-img{width:56px;height:56px;position:relative;margin-right:6px;font-size:45px}.header-logo-title{font-size:24px}.header-menu{display:flex;height:64px;line-height:64px;font-size:18px;margin:0 20px}.header-menu .menuItem{color:#2086ff;margin:0 16px;position:relative;transition:all .4s}.header-menu .menuItem:before{content:" ";position:absolute;width:0;border-radius:4px;height:3px;background-color:#2086ff;box-sizing:border-box;left:0;right:0;top:52px;transition:width .4s;margin:auto}.header-menu .menuItem.active,.header-menu .menuItem:hover{color:#0046ef}.header-menu .menuItem.active:before,.header-menu .menuItem:hover:before{background-color:#0046ef;display:block;width:50%}.header-user{margin-left:auto;font-size:18px;padding:0 15px;min-width:136px;text-align:center}.header-user>a{color:#2086ff;transition:all .3s}.header-user>a:hover{color:#0046ef}.header-user>a:hover>.circle{border-color:#0046ef}.header-user>a>.circle{padding:2px;border-radius:50%;border:2px solid #2086ff;position:relative;margin-right:6px}.header-user-menu>.ant-dropdown-menu-item{padding:6px 40px;color:#2086ff}.header-user-menu>.ant-dropdown-menu-item:hover{color:#0046ef}.siderbar-menu{padding:20px 0}.siderbar-menu .ant-menu{border-right:none;text-align:center;font-weight:400;font-size:15px;transition:all .4s}.siderbar-menu .ant-menu .ant-menu-submenu{margin-bottom:5px}.siderbar-menu .ant-menu .ant-menu-submenu .ant-menu-submenu-title{padding-right:12px}.siderbar-menu .ant-menu .ant-menu-submenu .ant-menu-title-content,.siderbar-menu .ant-menu .ant-menu-item .ant-menu-title-content{margin-right:8px}.siderbar-menu .ant-menu .ant-menu-submenu.ant-menu-item-selected,.siderbar-menu .ant-menu .ant-menu-item.ant-menu-item-selected,.siderbar-menu .ant-menu .ant-menu-submenu:hover,.siderbar-menu .ant-menu .ant-menu-item:hover{font-weight:400;background-color:#def}.siderbar-menu .ant-menu .ant-menu-submenu.ant-menu-item-selected .ant-menu-item-icon,.siderbar-menu .ant-menu .ant-menu-item.ant-menu-item-selected .ant-menu-item-icon,.siderbar-menu .ant-menu .ant-menu-submenu:hover .ant-menu-item-icon,.siderbar-menu .ant-menu .ant-menu-item:hover .ant-menu-item-icon{border:1px solid #2186ff}.siderbar-menu .ant-menu .ant-menu-submenu .ant-menu-item-icon,.siderbar-menu .ant-menu .ant-menu-item .ant-menu-item-icon{transition:all .3s;border:1px solid gray;font-weight:600;width:30px;height:30px;line-height:30px;border-radius:50%}.siderbar-menu .ant-menu .ant-menu-sub.ant-menu-inline>.ant-menu-item,.siderbar-menu .ant-menu .ant-menu-sub.ant-menu-inline>.ant-menu-submenu>.ant-menu-submenu-title{margin:0}.siderbar-menu-collapsed{position:absolute;bottom:0;right:0}.home{display:flex;padding-top:64px;width:100%;box-sizing:border-box}.home-content{width:80%;overflow-y:auto;overflow-x:hidden;margin-left:20%;padding:8px;min-height:calc(100vh - 68px)}.home-content-div{width:100%;height:100%;background:white;padding:16px}.home-siderbar{width:20%;background:white;text-align:center;height:calc(100vh - 64px);box-sizing:border-box;position:fixed}.home-info{width:20%;margin:12px 12px 12px 0;background:white;padding:24px}.loading-page{width:100%;height:100%;display:flex;flex-direction:column;justify-content:center;align-items:center;padding:20%}
var e=Object.defineProperty,r=Object.defineProperties,t=Object.getOwnPropertyDescriptors,a=Object.getOwnPropertySymbols,o=Object.prototype.hasOwnProperty,p=Object.prototype.propertyIsEnumerable,s=(r,t,a)=>t in r?e(r,t,{enumerable:!0,configurable:!0,writable:!0,value:a}):r[t]=a;import{s as n}from"./index.f608f11e.js";import{d as i,c as l,b,H as c}from"./vendor.98dba853.js";const f=i({name:"app",setup(){var e;return n.commit("app/set_sider",(e=((e,r)=>{for(var t in r||(r={}))o.call(r,t)&&s(e,t,r[t]);if(a)for(var t of a(r))p.call(r,t)&&s(e,t,r[t]);return e})({},n.state.app.sider),r(e,t({hidden:!0})))),()=>l(c,null,[l("h1",null,[b("这是About页面")])])}});export{f as default};
.login{width:380px;height:280px;background-color:#fff;border:1px solid #ddd;border-radius:4px;padding:30px 35px;box-shadow:0 0 4px 1px #777;box-sizing:border-box;position:absolute;top:0;bottom:0;right:80px;margin:auto}.login-forget{font-size:14px;float:right;margin-top:15px}.login>h1{font-size:22px;color:#717171;font-weight:700}.login>h1>span{font-weight:500}
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" href="/favicon.ico" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Vite App</title>
<script type="module" crossorigin src="/assets/index.f608f11e.js"></script>
<link rel="modulepreload" href="/assets/vendor.98dba853.js">
<link rel="stylesheet" href="/assets/index.4d155483.css">
<script type="module">!function(){try{new Function("m","return import(m)")}catch(o){console.warn("vite: loading legacy build because dynamic import is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}}();</script>
</head>
<body>
<div id="app"></div>
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
<script nomodule id="vite-legacy-polyfill" src="/assets/polyfills-legacy.34d9b402.js"></script>
<script nomodule id="vite-legacy-entry" data-src="/assets/index-legacy.90883ee8.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
</body>
</html>
# -*- coding: utf-8 -*-
# @Time : 2023/2/14 15:26
# @Author : ctt
# @File : transform_doc_to_docx
# @Project : 解析docx
import os
import pythoncom
import win32com.client
def closesoft():
print('''挂载程序关闭中……
''')
wc = win32com.client.constants
try:
wps = win32com.client.gencache.EnsureDispatch('kwps.application')
except:
wps = win32com.client.gencache.EnsureDispatch('wps.application')
else:
wps = win32com.client.gencache.EnsureDispatch('word.application')
try:
wps.Documents.Close()
wps.Documents.Close(wc.wdDoNotSaveChanges)
wps.Quit
except:
pass
def doc2docx(path):
pythoncom.CoInitialize()
w = win32com.client.Dispatch('Word.Application')
w.Visible = 0
w.DisplayAlerts = 0
doc = w.Documents.Open(path)
newpath = os.path.splitext(path)[0] + '.docx'
doc.SaveAs(newpath, 12, False, "", True, "", False, False, False, False)
doc.Close()
w.Quit()
return newpath
if __name__ == '__main__':
closesoft()
doc2docx(r'D:\四川报告\相关代码\从word中提取指定表格\data\特殊教育学校(1).doc')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author : LiuYan
# @Time : 2021/7/31 17:36
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : database
# @Author : LiuYan
# @Time : 2021/9/14 17:51
import pymysql
class DatabaseMySQL(object):
def __init__(self, database_config: dict):
super(DatabaseMySQL, self).__init__()
self._conn = None
self._cursor = None
self._database_config = database_config
self._connect()
def _connect(self) -> None:
self._database_config['port'] = int(self._database_config['port'])
self._database_config['charset'] = 'utf8'
self._database_config['cursorclass'] = pymysql.cursors.DictCursor
self._conn = pymysql.connect(**self._database_config)
self._cursor = self._conn.cursor()
def query(self, sql: str) -> list:
# 获取表单信息
print('SQL: {}'.format(sql))
self._cursor.execute(sql)
list_result = self._cursor.fetchall()
return list_result
def close(self) -> None:
self._cursor.close()
self._conn.close()
def is_connected(self):
"""Check if the server is alive"""
try:
self.conn.ping(reconnect=True)
print("db is connecting")
except Exception:
self.conn = self._connect()
print("db reconnect")
if __name__ == '__main__':
database_config = {
'host': '114.115.159.144',
'port': 3306,
'user': 'root',
'password': 'zzsn9988',
'database': 'clb_project'
}
dbm = DatabaseMySQL(database_config=database_config)
# sql = """select ds.data_source_name,ds.url,ds.params,ds.type,ds.data_name from clb_report_data_source ds inner join clb_report_data_set_source_map m on ds.id = m.data_source_id
# where m.data_set_id = '1641045122365317122'
# """
task_id = '1641261934625521666'
#
sql = '''SELECT ds.id,ds.param_value,te.file_path FROM clb_report_task t inner join clb_report_template te on t.template_id = te.id
inner join clb_report_data_set ds on te.data_set_id = ds.id
where t.id = {};'''.format(task_id)
list_result = dbm.query(sql=sql)
print(list_result)
dbm.close()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : database_oracle
# @Author : LiuYan
# @Time : 2021/10/13 16:10
import os
import cx_Oracle
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
class DatabaseOracle(object):
def __init__(self, database_config: dict):
super(DatabaseOracle, self).__init__()
self._conn = None
self._cursor = None
self._database_config = database_config
self._connect()
def _connect(self) -> None:
self._conn = cx_Oracle.connect(
self._database_config['user'],
self._database_config['password'],
self._database_config['host'] + ':' + str(self._database_config['port']) + '/' + self._database_config['database']
)
self._cursor = self._conn.cursor()
def query(self, sql: str) -> list:
# 获取表单信息
print('SQL: {}'.format(sql))
self._cursor.execute(sql)
list_result = self.rows_as_dicts()
return list_result
def rows_as_dicts(self) -> list:
"""
将查询结果转为dict
:return:
"""
col_names = [i[0] for i in self._cursor.description]
return [dict(zip(col_names, row)) for row in self._cursor]
def close(self) -> None:
self._cursor.close()
self._conn.close()
if __name__ == '__main__':
database_config = {
'host': '114.116.91.1',
'port': 1521,
'user': 'cis',
'password': 'cis_zzsn9988',
'database': 'ORCL'
}
dbo = DatabaseOracle(database_config=database_config)
sql = '''
select
TITLE as 标题,
SUMMARY as 摘要,
ORIGIN as 来源
from CIS_ANS_BASEDATA where ROWNUM < 10'''
list_result = dbo.query(sql=sql)
print(list_result)
dbo.close()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : log
# @Author : LiuYan
# @Time : 2020/6/21 21:08
import os
import logging
import logging.handlers
from pathlib import Path
__all__ = ['logger']
# 用户配置部分 ↓
import tqdm
LEVEL_COLOR = {
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}
STDOUT_LOG_FMT = '%(log_color)s[%(asctime)s] [%(levelname)s] [%(threadName)s] [%(filename)s:%(lineno)d] %(message)s'
STDOUT_DATE_FMT = '%Y-%m-%d %H:%M:%S'
FILE_LOG_FMT = '[%(asctime)s] [%(levelname)s] [%(threadName)s] [%(filename)s:%(lineno)d] %(message)s'
FILE_DATE_FMT = '%Y-%m-%d %H:%M:%S'
# 用户配置部分 ↑
class ColoredFormatter(logging.Formatter):
COLOR_MAP = {
'black': '30',
'red': '31',
'green': '32',
'yellow': '33',
'blue': '34',
'magenta': '35',
'cyan': '36',
'white': '37',
'bg_black': '40',
'bg_red': '41',
'bg_green': '42',
'bg_yellow': '43',
'bg_blue': '44',
'bg_magenta': '45',
'bg_cyan': '46',
'bg_white': '47',
'light_black': '1;30',
'light_red': '1;31',
'light_green': '1;32',
'light_yellow': '1;33',
'light_blue': '1;34',
'light_magenta': '1;35',
'light_cyan': '1;36',
'light_white': '1;37',
'light_bg_black': '100',
'light_bg_red': '101',
'light_bg_green': '102',
'light_bg_yellow': '103',
'light_bg_blue': '104',
'light_bg_magenta': '105',
'light_bg_cyan': '106',
'light_bg_white': '107',
}
def __init__(self, fmt, datefmt):
super(ColoredFormatter, self).__init__(fmt, datefmt)
def parse_color(self, level_name):
color_name = LEVEL_COLOR.get(level_name, '')
if not color_name:
return ""
color_value = []
color_name = color_name.split(',')
for _cn in color_name:
color_code = self.COLOR_MAP.get(_cn, '')
if color_code:
color_value.append(color_code)
return '\033[' + ';'.join(color_value) + 'm'
def format(self, record):
record.log_color = self.parse_color(record.levelname)
message = super(ColoredFormatter, self).format(record) + '\033[0m'
return message
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def _get_logger(log_to_file=True, log_filename='default.log', log_level='DEBUG'):
_logger = logging.getLogger(__name__)
stdout_handler = logging.StreamHandler()
stdout_handler.setFormatter(
ColoredFormatter(
fmt=STDOUT_LOG_FMT,
datefmt=STDOUT_DATE_FMT,
)
)
_logger.addHandler(stdout_handler)
# _logger.setLevel(logging.INFO)
# _logger.addHandler(TqdmLoggingHandler())
if log_to_file:
# _tmp_path = os.path.dirname(os.path.abspath(__file__))
# _tmp_path = os.path.join(_tmp_path, '../logs/{}'.format(log_filename))
_project_path = os.path.dirname(os.getcwd())
_tmp_path = os.path.join(_project_path, 'logs')
Path(_tmp_path).mkdir(parents=True, exist_ok=True)
_tmp_path = os.path.join(_tmp_path, log_filename)
file_handler = logging.handlers.TimedRotatingFileHandler(_tmp_path, when='midnight', backupCount=30)
file_formatter = logging.Formatter(
fmt=FILE_LOG_FMT,
datefmt=FILE_DATE_FMT,
)
file_handler.setFormatter(file_formatter)
_logger.addHandler(file_handler)
_logger.setLevel(log_level)
return _logger
logger = _get_logger(log_to_file=False)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : tool
# @Author : LiuYan
# @Time : 2021/6/21 11:22
from __future__ import unicode_literals, print_function, division
import os
import re
import json
import time
import zipfile
import datetime
import xlsxwriter
def read_json(path: str) -> list:
f = open(path, 'r', encoding='utf-8')
examples = []
for line in f.readlines():
examples.append(json.loads(line))
f.close()
return examples
def clean_text(text: str) -> str:
return re.sub('\n+', '\n', text.strip().replace(' ', '').replace('\t', '').replace('\r', ''))
# 打包zip
def make_zip(file_dir: str, zip_path: str) -> None:
zip_f = zipfile.ZipFile(zip_path, 'w')
pre_len = len(os.path.dirname(file_dir))
for parent, dir_names, filenames in os.walk(file_dir):
for filename in filenames:
path_file = os.path.join(parent, filename)
arc_name = path_file[pre_len:].strip(os.path.sep)
zip_f.write(path_file, arc_name)
zip_f.close()
# 删除zip
def delete_zip(zip_path: str) -> None:
os.remove(zip_path)
def timeit(f):
def timed(*args, **kw):
ts = time.time()
print('......begin {0:8s}......'.format(f.__name__))
result = f(*args, **kw)
te = time.time()
print('......finish {0:8s}, took:{1:.4f} sec......'.format(f.__name__, te - ts))
return result
return timed
def list2xlsx(result_list: list, xlsx_path: str):
"""
:param result_list: [
{
'id': 1,
'title': 't',
...
}
...
]
:param xlsx_path: '/home/zzsn/liuyan/result/result.xlsx'
:return:
"""
workbook = xlsxwriter.Workbook(xlsx_path)
worksheet = workbook.add_worksheet('sheet1')
worksheet.write_row(row=0, col=0, data=list(result_list[0].keys()))
for row_index, result_dict in enumerate(result_list):
worksheet.write_row(row=row_index + 1, col=0, data=list(
';'.join(result) if type(result) in [list, set] else result for result in result_dict.values()
))
workbook.close()
def return_json(handle_msg: str, is_handle_success: bool, logs: str or None, result_data: object) -> dict or json:
"""
:param handle_msg: str 处理信息:是否成功 'success' / 'failure'
:param is_handle_success: bool 是否处理成功
:param logs: str or None 处理过程以及结果信息
:param result_data: object 处理结果 数据
:return:
"""
dict_result = {
'handleMsg': handle_msg,
'isHandleSuccess': is_handle_success,
'logs': logs,
'resultData': result_data
}
return dict_result
# return json.dumps(dict_result, ensure_ascii=False)
def return_json_(handle_msg: str, is_handle_success: bool, logs: str or None, result_data: object) -> dict or json:
"""
:param handle_msg: str 处理信息:是否成功 'success' / 'failure'
:param is_handle_success: bool 是否处理成功
:param logs: str or None 处理过程以及结果信息
:param result_data: object 处理结果 数据
:return:
"""
dict_result = {
'handleMsg': handle_msg,
'ynHandleSuccess': is_handle_success,
'logs': logs,
'resultData': result_data
}
return json.dumps(dict_result, ensure_ascii=False)
def return_json_str(handle_msg: str, is_handle_success: bool, logs: str or None, result_data: object) -> dict or json:
"""
:param handle_msg: str 处理信息:是否成功 'success' / 'failure'
:param is_handle_success: bool 是否处理成功
:param logs: str or None 处理过程以及结果信息
:param result_data: object 处理结果 数据
:return:
"""
dict_result = {
'handleMsg': handle_msg,
'isHandleSuccess': is_handle_success,
'logs': logs,
'resultData': result_data
}
return json.dumps(dict_result, ensure_ascii=False)
def class_to_dict(obj):
is_list = obj.__class__ == [].__class__
is_set = obj.__class__ == set().__class__
if is_list or is_set:
obj_arr = []
for o in obj:
dict = {}
a = o.__dict__
if "_sa_instance_state" in a:
del a['_sa_instance_state']
dict.update(a)
obj_arr.append(dict)
return obj_arr
else:
dict = {}
a = obj.__dict__
if "_sa_instance_state" in a:
del a['_sa_instance_state']
dict.update(a)
return dict
def class_to_dict_field(obj):
"""
字段集合转化字典 如 db.session.query(User.created_time).all()
:param obj:
:return:
"""
is_list = obj.__class__ == [].__class__
is_set = obj.__class__ == set().__class__
if is_list or is_set:
obj_arr = []
for o in obj:
for v in dir(o):
if isinstance(o.__getattribute__(v), datetime.datetime) and not v.startswith("__"):
dict = {str(v): o.__getattribute__(v).strftime('%Y-%m-%d %H:%M:%S')}
obj_arr.append(dict)
return obj_arr
def class_to_dict_all(obj):
"""
例如 GlobalRegion.query.all()
:param obj:
:return:
"""
is_list = obj.__class__ == [].__class__
is_set = obj.__class__ == set().__class__
if is_list or is_set:
obj_arr = []
for o in obj:
for k, v in vars(o).items():
if isinstance(v, datetime.datetime):
o.__dict__[k] = v.strftime('%Y-%m-%d %H:%M:%S')
dict = {}
a = o.__dict__
if '_sa_instance_state' in a:
del a['_sa_instance_state']
dict.update(a)
obj_arr.append(dict)
return obj_arr
else:
dict = {}
for k, v in vars(obj).items():
if isinstance(v, datetime.datetime):
obj.__dict__[k] = v.strftime('%Y-%m-%d %H:%M:%S')
a = obj.__dict__
if '_sa_instance_state' in a:
del a['_sa_instance_state']
dict.update(a)
return dict
def formatGMTime(timestamp):
"""
格式化 GMT 时间
:param timestamp:
:return:
"""
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
return datetime.datetime.strptime(timestamp, GMT_FORMAT) + datetime.timedelta(hours=8)
#!/usr/bin/env python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import traceback
from fdfs_client.client import *
class FDFS(object):
def __init__(self, client_file):
self.client_file = client_file
self.client = self.create_client()
def create_client(self):
try:
client = Fdfs_client(self.client_file)
return client
except Exception as e:
print("FastDFS Create file fail, {0}, {1}".format(e, traceback.print_exc()))
return None
def download(self, file_name, file_id):
try:
ret_download = self.client.download_to_file(file_name, file_id)
print(ret_download)
return True
except Exception as e:
print("FastDFS download file fail, {0}, {1}".format(e, traceback.print_exc()))
return False
def upload(self, file_name):
try:
ret_upload = self.client.upload_by_filename(file_name)
print(ret_upload)
return True
except Exception as e:
print("FastDFS upload file fail, {0}, {1}".format(e, traceback.print_exc()))
return False
def delete(self, file_id):
try:
ret_delete = self.client.delete_file(file_id)
print(ret_delete)
return True
except Exception as e:
print("FastDFS delete file fail, {0}, {1}".format(e, traceback.print_exc()))
return False
if __name__ == "__main__":
client_file = "fdfs_client.conf"
upload_file = "/opt/abc.py"
download_file = "/opt/abc.docx"
file_id = "group1/M00/00/02/rBAAQWCt48iAUX7DAA-s5fjCGo402.docx"
fdfs = FDFS(client_file)
fdfs.download(download_file, file_id)
\ No newline at end of file
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论