灾害数据输出:综合明细表.xlsx

This commit is contained in:
liyubo 2025-11-18 10:42:17 +08:00
parent d3d2d23f18
commit 333238c7b1
3 changed files with 204 additions and 9 deletions

View File

@ -899,7 +899,8 @@ def predict_images_share_dir(task_id, pt_name, zip_url, user_name, pwd, output_d
found_paths = scanner.find_folders_by_name( found_paths = scanner.find_folders_by_name(
share_path=config['share'], share_path=config['share'],
folder_name='图像类', folder_name='图像类',
start_dir=config['dir'] start_dir=config['dir'],
max_depth=2
) )
target_path = "" # 识别图片目录 target_path = "" # 识别图片目录
@ -911,7 +912,8 @@ def predict_images_share_dir(task_id, pt_name, zip_url, user_name, pwd, output_d
found_paths = scanner.find_folders_by_name( found_paths = scanner.find_folders_by_name(
share_path=config['share'], share_path=config['share'],
folder_name='Images', folder_name='Images',
start_dir=tmpConfig['dir'] start_dir=tmpConfig['dir'],
max_depth=3
) )
if len(found_paths) > 0: if len(found_paths) > 0:
target_path = found_paths[0] target_path = found_paths[0]

View File

@ -758,7 +758,8 @@ def get_road_dict(dir,user_name,pwd) :
found_paths = scanner.find_files_by_name( found_paths = scanner.find_files_by_name(
share_path=config['share'], share_path=config['share'],
file_name='每公里指标明细表*.xls', file_name='每公里指标明细表*.xls',
start_dir=config['dir'] start_dir=config['dir'],
max_depth=1
) )
print(f"\n找到 {len(found_paths)}'fileindex.txt' 文件:") print(f"\n找到 {len(found_paths)}'fileindex.txt' 文件:")
for i, path in enumerate(found_paths, 1): for i, path in enumerate(found_paths, 1):
@ -789,7 +790,8 @@ def get_pile_dict(dir,user_name,pwd) :
found_paths = scanner.find_files_by_name( found_paths = scanner.find_files_by_name(
share_path=config['share'], share_path=config['share'],
file_name='fileindex.txt', file_name='fileindex.txt',
start_dir=config['dir'] start_dir=config['dir'],
max_depth=8
) )
print(f"\n找到 {len(found_paths)}'fileindex.txt' 文件:") print(f"\n找到 {len(found_paths)}'fileindex.txt' 文件:")
for i, path in enumerate(found_paths, 1): for i, path in enumerate(found_paths, 1):

View File

@ -6,6 +6,9 @@ import numpy as np
from collections import defaultdict from collections import defaultdict
import smb import smb
import pandas as pd import pandas as pd
from openpyxl import Workbook
from openpyxl.styles import Font, Alignment
from openpyxl.utils import get_column_letter
import glob import glob
from datetime import datetime from datetime import datetime
@ -169,7 +172,8 @@ def yoloseg_to_grid_share_dir(road_dict,pile_dict,image_path,label_file,cover_ra
cname = class_names[cls_id] if cls_id<len(class_names) else str(cls_id) cname = class_names[cls_id] if cls_id<len(class_names) else str(cls_id)
ids_str = '-'.join(map(str,sorted(covered_cells)))+'-' ids_str = '-'.join(map(str,sorted(covered_cells)))+'-'
result_lines.append(f"{cname} {f"桩号:K000{pile_no}"} {ROAD_TYPE_EN_TO_CN.get(road_type, 'xx')} {ids_str}") pile_no_tmp = f"桩号:K000{pile_no}"
result_lines.append(f"{cname} {pile_no_tmp} {ROAD_TYPE_EN_TO_CN.get(road_type, 'xx')} {ids_str}")
if cname not in all_class_cells: all_class_cells[cname]=set() if cname not in all_class_cells: all_class_cells[cname]=set()
cell_info.append(covered_cells) cell_info.append(covered_cells)
cell_info.append([max_x - min_x, max_y - min_y]) cell_info.append([max_x - min_x, max_y - min_y])
@ -273,6 +277,163 @@ def in_interval(increment, cur_pile_no, tmp_start, tmp_end) :
else : else :
return False return False
# 指定间隔区间,输出基本信息+灾害数据
def process_info(road_dict,pile_dict,summary_data,image_path,current_time,interval=10,dir="output",cell_area=CELL_AREA,grid_width=GRID_WIDTH,grid_height=GRID_HEIGHT) :
process_info_data = []
if summary_data:
img_file_name = os.path.basename(image_path)
road_data = get_road_info(road_dict, pile_dict, img_file_name)
identify_width = road_data.get('识别宽度(米)', '3.6')
up_or_down = road_data.get('方向(上行/下行)', '上行')
road_code = pile_dict.get(img_file_name)[0]
road_level = road_data.get('技术等级', '')
road_type_cn = road_data.get('路面类型(沥青/水泥/砂石)', '沥青')
group_list = summary_data
increment = float(0.001 * interval)
pile_no_start = float(0.000)
pile_no_end = float(0.000) + increment
# 上行/下行
if up_or_down == '下行' :
group_list = list(summary_data)[::-1]
increment = float(-0.001 * interval)
tmp_pile_no = convert_special_format(group_list[len(group_list)-1][1])
pile_no_start = tmp_pile_no
if tmp_pile_no % increment != 0 :
pile_no_end = tmp_pile_no + (tmp_pile_no % increment)
else :
pile_no_end = tmp_pile_no + increment
index = 0
tmp_start = pile_no_start
tmp_end = pile_no_end
while True :
# 每10m一个区间在区间内进行灾害计算
pile_no, DR, counts, road_type = group_list[index]
cur_pile_no = convert_special_format(pile_no)
pile_no_s = f"{tmp_start:0.3f}"
pile_no_e = f"{tmp_end:0.3f}"
up_or_down_code = "B" if up_or_down == '下行' else "A"
if not in_interval(increment, cur_pile_no, tmp_start, tmp_end) :
# 没在刻度内直接输出无病害数据
row = [road_code, pile_no_s,pile_no_e,up_or_down_code,up_or_down,road_level,f"{road_type_cn}路面",'','',f"{0:.2f}",'','','','','','','','','','','','','','','']
if road_type=="asphalt":
keys = list(CLASS_MAP_ASPHALT.keys())
elif road_type=="cement":
keys = list(CLASS_MAP_CEMENT.keys())
else:
keys = list(CLASS_MAP_GRAVEL.keys())
# for k in keys:
# row.append(f"{0:.2f}")
process_info_data.append(row)
# f.write(','.join(row)+'\n')
else :
row = [road_code, pile_no_s,pile_no_e,up_or_down_code,up_or_down,road_level,f"{road_type_cn}路面",'','']
subRows = []
while index < len(group_list):
pile_no, DR, counts, road_type = group_list[index]
cur_pile_no = convert_special_format(pile_no)
tmp_row = []
if in_interval(increment, cur_pile_no, tmp_start, tmp_end) :
pile_no = f"{tmp_start:0.3f}"
tmp_row = [DR]
if road_type=="asphalt":
keys = list(CLASS_MAP_ASPHALT.keys())
elif road_type=="cement":
keys = list(CLASS_MAP_CEMENT.keys())
else:
keys = list(CLASS_MAP_GRAVEL.keys())
# for k in keys:
# tmp_row.append(counts.get(k, [0,0,0])[0])
subRows.append(tmp_row)
index = index + 1
else :
break
# 同列汇总 10m一个区间--对应5张图
column_sums = [f"{(sum(column)/(interval / 2)):0.2f}" for column in zip(*subRows)]
row += column_sums
# f.write(','.join(row)+'\n')
row += ['','','','','','','','','','','','','','','']
process_info_data.append(row)
tmp_start = tmp_end
tmp_end = tmp_start + increment
print(f"tmp_start={tmp_start}, tmp_end={tmp_end}, index={index}, len(group_list)={len(group_list)}")
if index >= len(group_list) :
break
return process_info_data
def adjust_column_width(worksheet):
"""
调整列宽函数
"""
for col_idx in range(1, worksheet.max_column + 1):
max_length = 0
column_letter = get_column_letter(col_idx)
for row_idx in range(1, worksheet.max_row + 1):
try:
cell = worksheet.cell(row=row_idx, column=col_idx)
# 检查单元格是否在合并单元格范围内
if any(cell.coordinate in merged_range for merged_range in worksheet.merged_cells.ranges):
continue # 跳过合并单元格
if cell.value is not None:
cell_length = len(str(cell.value))
if cell_length > max_length:
max_length = cell_length
except:
continue
if max_length > 0:
worksheet.column_dimensions[column_letter].width = max_length + 2
def create_multiple_sheets_with_multiple_headers(file_path, excel_data):
# 创建工作簿
wb = Workbook()
# 删除默认创建的sheet
wb.remove(wb.active)
for sheet_data in excel_data:
# 创建工作表
ws = wb.create_sheet(title=sheet_data['name'])
# 设置多表头
current_row = 1
# 多级表头
for heads in sheet_data['headers'] :
for head in heads :
ws.merge_cells(f'{head[1]}{current_row}:{head[2]}{current_row}')
ws[f'{head[1]}{current_row}'] = head[0]
ws[f'{head[1]}{current_row}'].font = Font(bold=True, size=14)
ws[f'{head[1]}{current_row}'].alignment = Alignment(horizontal='center')
current_row += 1
# 列标题
for col_idx, column_name in enumerate(sheet_data['columns'], 1):
cell = ws.cell(row=current_row, column=col_idx, value=column_name)
cell.font = Font(bold=True)
current_row += 1
# 写入数据
for row_data in sheet_data['data']:
for col_idx, value in enumerate(row_data, 1):
ws.cell(row=current_row, column=col_idx, value=value)
current_row += 1
# 自动调整列宽
adjust_column_width(ws)
# 保存文件
wb.save(file_path)
print(f"文件已保存为 {file_path}")
# ---------------- 主函数-共享目录 ---------------- # ---------------- 主函数-共享目录 ----------------
def process_dir(road_dict,pile_dict,dir="output",cell_area=CELL_AREA,grid_width=GRID_WIDTH,grid_height=GRID_HEIGHT): def process_dir(road_dict,pile_dict,dir="output",cell_area=CELL_AREA,grid_width=GRID_WIDTH,grid_height=GRID_HEIGHT):
@ -367,7 +528,7 @@ def process_dir(road_dict,pile_dict,dir="output",cell_area=CELL_AREA,grid_width=
if up_or_down == '下行' : if up_or_down == '下行' :
group_list = list(group)[::-1] group_list = list(group)[::-1]
increment = float(-0.010) increment = float(-0.010)
tmp_pile_no = convert_special_format(group[len(group)-1][1]) tmp_pile_no = convert_special_format(group_list[len(group_list)-1][1])
pile_no_start = tmp_pile_no pile_no_start = tmp_pile_no
if tmp_pile_no % increment != 0 : if tmp_pile_no % increment != 0 :
pile_no_end = tmp_pile_no + (tmp_pile_no % increment) pile_no_end = tmp_pile_no + (tmp_pile_no % increment)
@ -381,7 +542,7 @@ def process_dir(road_dict,pile_dict,dir="output",cell_area=CELL_AREA,grid_width=
tmp_end = pile_no_end tmp_end = pile_no_end
while True : while True :
# 每10m一个区间在区间内进行灾害计算 # 每10m一个区间在区间内进行灾害计算
pile_no, DR, counts, road_type = summary_data[index] pile_no, DR, counts, road_type = group_list[index]
cur_pile_no = convert_special_format(pile_no) cur_pile_no = convert_special_format(pile_no)
if not in_interval(increment, cur_pile_no, tmp_start, tmp_end) : if not in_interval(increment, cur_pile_no, tmp_start, tmp_end) :
# 没在刻度内直接输出无病害数据 # 没在刻度内直接输出无病害数据
@ -400,7 +561,7 @@ def process_dir(road_dict,pile_dict,dir="output",cell_area=CELL_AREA,grid_width=
row = [f"{tmp_start:0.3f}", identify_width] row = [f"{tmp_start:0.3f}", identify_width]
subRows = [] subRows = []
while index < len(group_list): while index < len(group_list):
pile_no, DR, counts, road_type = summary_data[index] pile_no, DR, counts, road_type = group_list[index]
cur_pile_no = convert_special_format(pile_no) cur_pile_no = convert_special_format(pile_no)
tmp_row = [] tmp_row = []
@ -452,7 +613,37 @@ def process_dir(road_dict,pile_dict,dir="output",cell_area=CELL_AREA,grid_width=
data_list.append(excel_data) data_list.append(excel_data)
all_data = [headers] + data_list all_data = [headers] + data_list
smb.write_to_excel_pandas(all_data, img_file_path + '/病害明显列表.xlsx') smb.write_to_excel_pandas(all_data, img_file_path + '/病害明细列表.xlsx')
# 综合明细表.xlsx
heads = ['路线编码','起点','终点','车道编码','上下行','公路等级','路面类型','PQI','PQI等级','DR(%)','PCI','PCI等级','IRI','RQI','RQI等级','RD','RDI','RDI等级','SMTD','PBI','PBI等级','WR','PWI','PWI等级','备注']
data1 = process_info(road_dict,pile_dict,summary_data,image_path,current_time,10)
data2 = process_info(road_dict,pile_dict,summary_data,image_path,current_time,100)
data3 = process_info(road_dict,pile_dict,summary_data,image_path,current_time,1000)
excel_data = [
{
'name': '综合明细十米',
'headers': [[['综合明细十米','A','Y']]],
'columns': heads,
'data': data1
},
{
'name': '综合明细百米',
'headers': [[['综合明细百米','A','Y']]],
'columns': heads,
'data': data2
},
{
'name': '综合明细千米',
'headers': [[['综合明细千米','A','Y']]],
'columns': heads,
'data': data3
}
]
create_multiple_sheets_with_multiple_headers(img_file_path + '/综合明细表.xlsx', excel_data)
# ---------------- 主函数 ---------------- # ---------------- 主函数 ----------------
def process_zip(zip_path,pile_map_file,output_dir="output",cell_area=CELL_AREA,grid_width=GRID_WIDTH,grid_height=GRID_HEIGHT): def process_zip(zip_path,pile_map_file,output_dir="output",cell_area=CELL_AREA,grid_width=GRID_WIDTH,grid_height=GRID_HEIGHT):