生成汇总目录sum,输出文件名以路线编码打头

This commit is contained in:
liyubo 2026-04-09 09:35:39 +08:00
parent 5b18b592d0
commit 8dff981583
3 changed files with 282 additions and 23 deletions

View File

@ -703,7 +703,7 @@ class YOLOSegmentationInference:
推理结果列表 推理结果列表
""" """
tmp_output_dir = output_dir + "\\" + datetime.now().strftime("%Y%m%d%H%M%S") tmp_output_dir = output_dir + "\\" + current_time
print(f"正在处理共享目录: {input_dir} - {tmp_output_dir}") print(f"正在处理共享目录: {input_dir} - {tmp_output_dir}")
try: try:
@ -770,14 +770,22 @@ class YOLOSegmentationInference:
print(f"清理后,内存使用: {mem_usage:.2f} MB") print(f"清理后,内存使用: {mem_usage:.2f} MB")
# 处理输出目录的文件格式 # 处理输出目录的文件格式
process_dir(road_dict, pile_dict, tmp_output_dir) process_dir(road_dict, pile_dict, current_time, tmp_output_dir)
# 识别生成的文件压缩成zip # 识别生成的文件压缩成zip
zip_folder_shutil(tmp_output_dir) # zip_folder_shutil(tmp_output_dir)
move_file_shutil(tmp_output_dir+".zip", tmp_output_dir + f"/{task_id}.zip") # move_file_shutil(tmp_output_dir+".zip", tmp_output_dir + f"/{task_id}.zip")
# 生成汇总目录:图像类/sum/,包含灾害数据.txt+病害明细列表.xlsx+综合明细表.xlsx
image_class_dir = smb_tool.extract_upto_image_class(tmpConfig['dir'])
remote_image_class_dir = f"{image_class_dir}/sum/{task_id}/{current_time}"
scanner.upload_directory(f"{tmp_output_dir}/sum", config['share'], remote_dir=remote_image_class_dir)
del_file_shutil(f"{tmp_output_dir}/sum")
# 上传识别完整结果
remote_dir = f"{tmpConfig['dir']}_识别/{task_id}/{current_time}" remote_dir = f"{tmpConfig['dir']}_识别/{task_id}/{current_time}"
scanner.upload_directory(tmp_output_dir, config['share'], remote_dir=remote_dir) scanner.upload_directory(tmp_output_dir, config['share'], remote_dir=remote_dir)
# del_file_shutil(tmp_output_dir) del_file_shutil(tmp_output_dir)
# 更新识别任务状态为已结束 # 更新识别任务状态为已结束
DB_CONFIG = { DB_CONFIG = {

View File

@ -13,6 +13,7 @@ import cv2
import pandas as pd import pandas as pd
import io import io
import shutil import shutil
from pathlib import Path
class SMBScanner: class SMBScanner:
@ -500,6 +501,158 @@ class SMBScanner:
except Exception as e: except Exception as e:
print(f"上传文件失败: {e}") print(f"上传文件失败: {e}")
return False return False
def download_directory(self, share_name, remote_dir, local_dir, overwrite=True, exclude_patterns=None):
"""
从远程共享目录下载到本地目录
Args:
share_name: 共享名称
remote_dir: 远程目录路径
local_dir: 本地目录路径
overwrite: 是否覆盖已存在的文件
exclude_patterns: 要排除的文件名模式列表默认为 None
Returns:
bool: 下载是否成功
"""
if not self.connect():
return False
# 默认排除模式
if exclude_patterns is None:
exclude_patterns = ['_grid.jpg', '_grid.txt']
print(f"开始下载目录: {share_name}/{remote_dir} -> {local_dir}")
print(f"排除文件模式: {exclude_patterns}")
# 确保本地目录存在
if not os.path.exists(local_dir):
try:
os.makedirs(local_dir, exist_ok=True)
print(f"创建本地目录: {local_dir}")
except Exception as e:
print(f"创建本地目录失败: {e}")
return False
try:
# 递归下载目录内容
success = self._download_directory_recursive(share_name, remote_dir, local_dir, overwrite, exclude_patterns)
if success:
print("目录下载完成")
else:
print("目录下载过程中出现错误")
return success
except Exception as e:
print(f"下载目录失败: {e}")
return False
def _download_directory_recursive(self, share_name, remote_path, local_path, overwrite, exclude_patterns):
"""
递归下载目录内容
Args:
share_name: 共享名称
remote_path: 远程目录路径
local_path: 本地目录路径
overwrite: 是否覆盖已存在的文件
exclude_patterns: 要排除的文件名模式列表
Returns:
bool: 下载是否成功
"""
try:
success = True
# 构建远程完整路径
full_remote_path = self.build_full_path(share_name, remote_path)
# 遍历远程目录
for entry in scandir(full_remote_path):
remote_item_path = f"{remote_path}/{entry.name}" if remote_path else entry.name
local_item_path = os.path.join(local_path, entry.name)
if entry.is_dir():
# 处理子目录
print(f"下载子目录: {entry.name}")
# 确保本地子目录存在
if not os.path.exists(local_item_path):
try:
os.makedirs(local_item_path, exist_ok=True)
except Exception as e:
print(f"创建本地子目录失败 {local_item_path}: {e}")
success = False
continue
# 递归下载子目录
sub_success = self._download_directory_recursive(share_name, remote_item_path, local_item_path, overwrite, exclude_patterns)
if not sub_success:
success = False
else:
# 下载文件
file_success = self._download_single_file(share_name, remote_item_path, local_item_path, overwrite, exclude_patterns)
if not file_success:
success = False
return success
except Exception as e:
print(f"下载目录内容失败 {remote_path}: {e}")
return False
def _download_single_file(self, share_name, remote_file_path, local_file_path, overwrite, exclude_patterns):
"""
下载单个文件
Args:
share_name: 共享名称
remote_file_path: 远程文件路径
local_file_path: 本地文件路径
overwrite: 是否覆盖已存在的文件
exclude_patterns: 要排除的文件名模式列表
Returns:
bool: 下载是否成功
"""
try:
# 检查文件是否已存在
if not overwrite and os.path.exists(local_file_path):
print(f"文件已存在,跳过: {local_file_path}")
return True
# 检查文件名是否匹配排除模式,如果是则跳过
file_name = os.path.basename(remote_file_path)
for pattern in exclude_patterns:
if file_name.endswith(pattern):
print(f"文件被过滤,跳过: {file_name}")
return True
# 构建远程完整路径
full_remote_path = self.build_full_path(share_name, remote_file_path)
# 下载文件
print(f"下载文件: {file_name}")
# 读取远程文件
with open_file(full_remote_path, mode='rb') as remote_file:
remote_content = remote_file.read()
# 写入本地文件
with open(local_file_path, 'wb') as local_file:
local_file.write(remote_content)
file_size = len(remote_content)
print(f"文件下载成功: {local_file_path} ({file_size} 字节)")
return True
except Exception as e:
print(f"下载文件失败 {remote_file_path}: {e}")
return False
def find_folders_by_name(self, share_path, folder_name, start_dir="", max_depth=10): def find_folders_by_name(self, share_path, folder_name, start_dir="", max_depth=10):
"""专门查找文件夹""" """专门查找文件夹"""
@ -754,6 +907,23 @@ def standardized_path(path) :
return path return path
def extract_upto_image_class(path_str):
"""
提取路径中直到 '图像类' 目录的部分包括 '图像类' 本身
如果路径中没有 '图像类'返回原始路径
"""
p = Path(path_str)
parts = p.parts # 获取所有路径部分(元组)
try:
idx = parts.index('图像类') # 找到 '图像类' 的位置
except ValueError:
# 没找到,返回原始路径
return path_str
# 取到 '图像类' 为止的部分,再组合成路径
return Path(*parts[:idx+1])
# 从传入的路径中提取ip共享目录目标访问目录 # 从传入的路径中提取ip共享目录目标访问目录
def get_conf(zip_url, user_name, pwd) : def get_conf(zip_url, user_name, pwd) :
zip_url = standardized_path(zip_url) zip_url = standardized_path(zip_url)
@ -769,12 +939,16 @@ def get_conf(zip_url, user_name, pwd) :
dir = '/'.join(new_parts) dir = '/'.join(new_parts)
excel_dir = '' excel_dir = ''
fileindex_dir = ''
excel_dir = '/'.join(parts[2:]) excel_dir = '/'.join(parts[2:])
fileindex_dir = '/'.join(parts[2:])
for index, part in enumerate(parts, 0) : for index, part in enumerate(parts, 0) :
if part == 'Images' and index - 2 > 2 : if part == 'Images' and index - 2 > 2 :
excel_dir = '/'.join(parts[2:index-2]) excel_dir = '/'.join(parts[2:index-2])
fileindex_dir = '/'.join(parts[2:index])
if part == '图像类' and index > 2 : if part == '图像类' and index > 2 :
excel_dir = '/'.join(parts[2:index]) excel_dir = '/'.join(parts[2:index])
fileindex_dir = '/'.join(parts[2:index+2])
# 配置信息 # 配置信息
@ -785,7 +959,8 @@ def get_conf(zip_url, user_name, pwd) :
'domain': '', # 工作组留空 'domain': '', # 工作组留空
'share': parts[1], 'share': parts[1],
'dir': dir, 'dir': dir,
'excel_dir': excel_dir 'excel_dir': excel_dir,
'fileindex_dir': fileindex_dir,
} }
return config return config
@ -813,8 +988,8 @@ def get_road_dict(dir,user_name,pwd) :
found_paths = scanner.find_files_by_name( found_paths = scanner.find_files_by_name(
share_path=config['share'], share_path=config['share'],
file_name='每公里指标明细表*.xls*', file_name='每公里指标明细表*.xls*',
start_dir=config['dir'], start_dir=config['excel_dir'],
max_depth=4 max_depth=2
) )
print(f"\n找到 {len(found_paths)}'每公里指标明细表*.xls*' 文件:") print(f"\n找到 {len(found_paths)}'每公里指标明细表*.xls*' 文件:")
for i, path in enumerate(found_paths, 1): for i, path in enumerate(found_paths, 1):
@ -845,8 +1020,8 @@ def get_road_dict_for_width(config, scanner):
found_paths_for_width = scanner.find_files_by_name( found_paths_for_width = scanner.find_files_by_name(
share_path=config['share'], share_path=config['share'],
file_name='nl_yy_glzb*.xls', file_name='nl_yy_glzb*.xls',
start_dir=config['dir'], start_dir=config['excel_dir'],
max_depth=4 max_depth=2
) )
road_dict_for_width = {} road_dict_for_width = {}
if len(found_paths_for_width) > 0 : if len(found_paths_for_width) > 0 :
@ -871,8 +1046,8 @@ def get_pile_dict(dir,user_name,pwd) :
found_paths = scanner.find_files_by_name( found_paths = scanner.find_files_by_name(
share_path=config['share'], share_path=config['share'],
file_name='fileindex.txt', file_name='fileindex.txt',
start_dir=config['dir'], start_dir=config['fileindex_dir'],
max_depth=2 max_depth=4
) )
print(f"\n找到 {len(found_paths)}'fileindex.txt' 文件:") print(f"\n找到 {len(found_paths)}'fileindex.txt' 文件:")
for i, path in enumerate(found_paths, 1): for i, path in enumerate(found_paths, 1):

View File

@ -7,6 +7,7 @@ from openpyxl import Workbook
from openpyxl.styles import Font, Alignment from openpyxl.styles import Font, Alignment
from openpyxl.utils import get_column_letter from openpyxl.utils import get_column_letter
import glob import glob
import shutil
from datetime import datetime from datetime import datetime
try: try:
# 相对导入 # 相对导入
@ -15,6 +16,33 @@ except ImportError:
# 直接运行时回退到绝对导入 # 直接运行时回退到绝对导入
import smb_tool import smb_tool
# ---------------- 图片操作 ----------------
def read_image(image_path):
"""读取图片,处理中文路径"""
try:
image_path = smb_tool.standardized_path(image_path)
img_array = np.fromfile(image_path, dtype=np.uint8)
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
return img
except Exception as e:
print(f"读取图片失败: {image_path}, 错误: {e}")
return None
def save_image(image, save_path):
"""保存图片,处理中文路径"""
try:
save_path = smb_tool.standardized_path(save_path)
success, img_encoded = cv2.imencode('.jpg', image)
if success:
img_encoded.tofile(save_path)
return True
else:
print(f"编码图片失败: {save_path}")
return False
except Exception as e:
print(f"保存图片失败: {save_path}, 错误: {e}")
return False
# ---------------- 常量 ---------------- # ---------------- 常量 ----------------
CELL_WIDTH = 0.1 # 每格宽 (米) CELL_WIDTH = 0.1 # 每格宽 (米)
CELL_HEIGHT = 0.1 # 每格高 (米) CELL_HEIGHT = 0.1 # 每格高 (米)
@ -63,8 +91,10 @@ def calc_grid_param(pic_width, pic_height, actual_width, actual_height, grid_x_c
def draw_grid_on_image(image_path, grid_cells, cell_size=(GRID_WIDTH, GRID_HEIGHT), save_path=None): def draw_grid_on_image(image_path, grid_cells, cell_size=(GRID_WIDTH, GRID_HEIGHT), save_path=None):
image = cv2.imread(image_path) # 使用公共方法读取图片
if image is None: return image = read_image(image_path)
if image is None: return None
h, w = image.shape[:2] h, w = image.shape[:2]
cell_w, cell_h = cell_size cell_w, cell_h = cell_size
cols = w // cell_w cols = w // cell_w
@ -84,7 +114,9 @@ def draw_grid_on_image(image_path, grid_cells, cell_size=(GRID_WIDTH, GRID_HEIGH
cv2.line(image,(i,0),(i,h),(100,100,100),1) cv2.line(image,(i,0),(i,h),(100,100,100),1)
for j in range(0, h, cell_h): for j in range(0, h, cell_h):
cv2.line(image,(0,j),(w,j),(100,100,100),1) cv2.line(image,(0,j),(w,j),(100,100,100),1)
if save_path: cv2.imwrite(save_path,image) if save_path:
# 使用公共方法保存图片
save_image(image, save_path)
return image return image
def detect_road_type_from_content(label_file): def detect_road_type_from_content(label_file):
@ -102,6 +134,14 @@ def detect_road_type_from_content(label_file):
if kw in content: return "gravel" if kw in content: return "gravel"
return "gravel" return "gravel"
def get_road_code(pile_dict) :
"""获取路线编码"""
img_file_name = list(pile_dict.keys())[0]
parts = pile_dict.get(img_file_name)
if parts :
road_code = parts[0]
return road_code
def get_road_info(road_dict, pile_dict, img_file_name): def get_road_info(road_dict, pile_dict, img_file_name):
"""获取路线信息""" """获取路线信息"""
parts = pile_dict.get(img_file_name) parts = pile_dict.get(img_file_name)
@ -172,8 +212,8 @@ def yoloseg_to_grid_share_dir(road_dict,pile_dict,image_path,label_file,cell_ind
else: class_map = CLASS_MAP_GRAVEL else: class_map = CLASS_MAP_GRAVEL
class_names = list(class_map.keys()) class_names = list(class_map.keys())
img = cv2.imread(image_path) img = read_image(image_path)
if img is None: return "", {} if img is None: return "", {}, road_type, 20 * RESERVED_CELL_NUM
h, w = img.shape[:2] h, w = img.shape[:2]
cols = max(1, w//grid_width) cols = max(1, w//grid_width)
rows = max(1, h//grid_height) rows = max(1, h//grid_height)
@ -286,6 +326,12 @@ def yoloseg_to_grid(image_path,label_file,cover_ratio=COVER_RATIO):
all_class_cells[cname].update(covered_cells) all_class_cells[cname].update(covered_cells)
return '\n'.join(result_lines), all_class_cells, road_type return '\n'.join(result_lines), all_class_cells, road_type
def generate_classes_txt_content(road_type) :
if road_type=="asphalt": return "龟裂\n块状裂缝\n纵向裂缝\n横向裂缝\n沉陷\n车辙\n波浪拥包\n坑槽\n松散\n泛油\n修补"
if road_type=="cement": return "破碎板\n裂缝\n板角断裂\n错台\n拱起\n边角剥落\n接缝料损坏\n坑洞\n唧泥\n露骨\n修补"
if road_type=="gravel": return "坑槽\n沉陷\n车辙\n波浪搓板"
return "龟裂\n块状裂缝\n纵向裂缝\n横向裂缝\n沉陷\n车辙\n波浪拥包\n坑槽\n松散\n泛油\n修补"
def generate_header(road_type): def generate_header(road_type):
if road_type=="asphalt": return "起点桩号(km),识别宽度(m),破损率DR(%),龟裂,块状裂缝,纵向裂缝,横向裂缝,沉陷,车辙,波浪拥包,坑槽,松散,泛油,修补" if road_type=="asphalt": return "起点桩号(km),识别宽度(m),破损率DR(%),龟裂,块状裂缝,纵向裂缝,横向裂缝,沉陷,车辙,波浪拥包,坑槽,松散,泛油,修补"
if road_type=="cement": return "起点桩号(km),识别宽度(m),破损率DR(%),破碎板,裂缝,板角断裂,错台,拱起,边角剥落,接缝料损坏,坑洞,唧泥,露骨,修补" if road_type=="cement": return "起点桩号(km),识别宽度(m),破损率DR(%),破碎板,裂缝,板角断裂,错台,拱起,边角剥落,接缝料损坏,坑洞,唧泥,露骨,修补"
@ -557,8 +603,11 @@ def format_number_to_k_code(number):
return f"K{integer_part:04d}+{decimal_part}" return f"K{integer_part:04d}+{decimal_part}"
# ---------------- 主函数-共享目录 ---------------- # ---------------- 主函数-共享目录 ----------------
def process_dir(road_dict,pile_dict,dir="output",cell_area=CELL_AREA,cell_width=CELL_WIDTH,cell_height=CELL_HEIGHT,grid_width=GRID_WIDTH,grid_height=GRID_HEIGHT): def process_dir(road_dict,pile_dict,current_time,dir="output",cell_area=CELL_AREA,cell_width=CELL_WIDTH,cell_height=CELL_HEIGHT,grid_width=GRID_WIDTH,grid_height=GRID_HEIGHT):
os.makedirs(dir,exist_ok=True) os.makedirs(dir,exist_ok=True)
os.makedirs(f"{dir}/DR",exist_ok=True)
os.makedirs(f"{dir}/excel",exist_ok=True)
os.makedirs(f"{dir}/sum",exist_ok=True)
# 识别宽度(米) # 识别宽度(米)
road_recognize_width = road_dict.get('识别宽度(米)', 3.6) road_recognize_width = road_dict.get('识别宽度(米)', 3.6)
road_recognize_height = ROAD_RECOGNIZE_HEIGHT road_recognize_height = ROAD_RECOGNIZE_HEIGHT
@ -583,7 +632,9 @@ def process_dir(road_dict,pile_dict,dir="output",cell_area=CELL_AREA,cell_width=
# 读取图片并计算宽高像素点 # 读取图片并计算宽高像素点
if grid_width is None or grid_height is None : if grid_width is None or grid_height is None :
img = cv2.imread(image_path) # 使用公共方法读取图片
img = read_image(image_path)
if img is not None : if img is not None :
h, w = img.shape[:2] h, w = img.shape[:2]
# 网格像素宽高 # 网格像素宽高
@ -625,7 +676,8 @@ def process_dir(road_dict,pile_dict,dir="output",cell_area=CELL_AREA,cell_width=
DR= total_area / all_cell_num * 100 DR= total_area / all_cell_num * 100
summary_data.append((pile_no, DR, counts, road_type)) summary_data.append((pile_no, DR, counts, road_type))
current_time = datetime.now().strftime("%Y%m%d%H%M%S") # classes.txt
process_classes_txt(road_dict, pile_dict, dir, image_path)
# 写桩号问题列表.txt # 写桩号问题列表.txt
process_damage_detail_txt(road_dict, pile_dict, dir, summary_data, current_time) process_damage_detail_txt(road_dict, pile_dict, dir, summary_data, current_time)
@ -639,6 +691,13 @@ def process_dir(road_dict,pile_dict,dir="output",cell_area=CELL_AREA,cell_width=
# 综合明细表.xlsx # 综合明细表.xlsx
process_damage_composite_excel(road_dict, pile_dict, dir, summary_data, current_time) process_damage_composite_excel(road_dict, pile_dict, dir, summary_data, current_time)
def process_classes_txt(road_dict, pile_dict, dir, image_path):
img_file_name = os.path.basename(image_path)
_, _, road_type = detect_road_type_from_road_dict(road_dict, pile_dict, img_file_name)
classes_txt_content = generate_classes_txt_content(road_type)
with open(f"{dir}/classes.txt", "w") as file:
file.write(classes_txt_content)
def process_damage_composite_excel(road_dict, pile_dict, dir, summary_data, current_time): def process_damage_composite_excel(road_dict, pile_dict, dir, summary_data, current_time):
print("输出:综合明细表.xlsx") print("输出:综合明细表.xlsx")
@ -646,6 +705,7 @@ def process_damage_composite_excel(road_dict, pile_dict, dir, summary_data, curr
data1 = process_info(road_dict,pile_dict,summary_data,current_time,10) data1 = process_info(road_dict,pile_dict,summary_data,current_time,10)
data2 = process_info(road_dict,pile_dict,summary_data,current_time,100) data2 = process_info(road_dict,pile_dict,summary_data,current_time,100)
data3 = process_info(road_dict,pile_dict,summary_data,current_time,1000) data3 = process_info(road_dict,pile_dict,summary_data,current_time,1000)
road_code = get_road_code(pile_dict)
excel_data = [ excel_data = [
{ {
@ -667,7 +727,12 @@ def process_damage_composite_excel(road_dict, pile_dict, dir, summary_data, curr
'data': data3 'data': data3
} }
] ]
create_multiple_sheets_with_multiple_headers(f"{dir}/excel/综合明细表.xlsx", excel_data) out_file = f"{dir}/excel/{road_code}-综合明细表.xlsx"
out_file_sum = f"{dir}/sum/{road_code}-综合明细表.xlsx"
create_multiple_sheets_with_multiple_headers(out_file, excel_data)
# 将文件复制到汇总目录
shutil.copy(out_file, out_file_sum)
def process_damage_detail_txt(road_dict, pile_dict, dir, summary_data, current_time): def process_damage_detail_txt(road_dict, pile_dict, dir, summary_data, current_time):
@ -721,7 +786,14 @@ def process_damage_detail_excel(road_dict, pile_dict, dir, cell_area, cell_width
data_list.append(excel_data) data_list.append(excel_data)
all_data = [headers] + data_list all_data = [headers] + data_list
smb_tool.write_to_excel_pandas(all_data, f"{dir}/excel/病害明细列表.xlsx")
road_code = get_road_code(pile_dict)
out_file = f"{dir}/excel/{road_code}-病害明细列表.xlsx"
out_file_sum = f"{dir}/sum/{road_code}-病害明细列表.xlsx"
smb_tool.write_to_excel_pandas(all_data, out_file)
# 将文件复制到汇总目录
shutil.copy(out_file, out_file_sum)
def process_damage_txt(road_dict, pile_dict, dir, summary_data, current_time): def process_damage_txt(road_dict, pile_dict, dir, summary_data, current_time):
@ -742,6 +814,7 @@ def process_damage_txt(road_dict, pile_dict, dir, summary_data, current_time):
min_pile, max_pile = get_min_max_pile(group) min_pile, max_pile = get_min_max_pile(group)
print(f"{road_code}-DR-{min_pile:0.3f}-{max_pile:0.3f}-{current_time}.txt") print(f"{road_code}-DR-{min_pile:0.3f}-{max_pile:0.3f}-{current_time}.txt")
out_file = os.path.join(f"{dir}/DR",f"{road_code}-DR-{min_pile:0.3f}-{max_pile:0.3f}-{current_time}.txt") out_file = os.path.join(f"{dir}/DR",f"{road_code}-DR-{min_pile:0.3f}-{max_pile:0.3f}-{current_time}.txt")
out_file_sum = os.path.join(f"{dir}/sum",f"{road_code}-DR-{min_pile:0.3f}-{max_pile:0.3f}-{current_time}.txt")
header = generate_header(road_type) header = generate_header(road_type)
group_list = [] group_list = []
@ -849,6 +922,8 @@ def process_damage_txt(road_dict, pile_dict, dir, summary_data, current_time):
tmp_end = tmp_start + increment tmp_end = tmp_start + increment
tmp_end = round(tmp_end, 3) tmp_end = round(tmp_end, 3)
# 将文件复制到汇总目录
shutil.copy(out_file, out_file_sum)
print(f"输出完成: {out_file}") print(f"输出完成: {out_file}")
@ -1037,7 +1112,8 @@ if __name__=="__main__":
output_dir = "D:/devForBdzlWork/ai-train_platform/predictions/jlp/C006500107A" output_dir = "D:/devForBdzlWork/ai-train_platform/predictions/jlp/C006500107A"
pile_dict = get_pile_dict(output_dir) pile_dict = get_pile_dict(output_dir)
road_dict = get_road_dict(output_dir) road_dict = get_road_dict(output_dir)
process_dir(road_dict, pile_dict, output_dir) current_time = datetime.now().strftime("%Y%m%d%H%M%S")
process_dir(road_dict, pile_dict, current_time, output_dir)
# arr = [44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 68, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179] # arr = [44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 68, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179]
# for a in arr : # for a in arr :