project : swcb

This commit is contained in:
yooooger 2025-11-11 09:46:49 +08:00
parent 04c0b96f33
commit 3e63dc6f8e
4 changed files with 839 additions and 0 deletions

View File

@ -0,0 +1,249 @@
import os
import re
import cv2
import numpy as np
import shutil
from concurrent.futures import ThreadPoolExecutor
# ------------------ 工具函数 ------------------
def clean_filename(name):
"""去掉空格/换行/制表符,小写化"""
name = name.strip()
name = re.sub(r'[\s\r\n\t]+', '', name)
return name.lower()
def num_to_coord(num, cols, cell_width, cell_height, offset=1):
n = num - 1 + offset
r = n // cols
c = n % cols
x1 = c * cell_width
y1 = r * cell_height
x2 = x1 + cell_width
y2 = y1 + cell_height
return x1, y1, x2, y2
def polygon_to_yolo(poly, img_width, img_height):
flat = [coord for point in poly for coord in point]
return [flat[i] / (img_width if i % 2 == 0 else img_height) for i in range(len(flat))]
def convex_hull_poly(points):
if not points:
return []
pts = np.array(points, dtype=np.int32)
hull = cv2.convexHull(pts)
return hull.reshape(-1, 2).tolist()
color_map = {
0: (0, 255, 255),
1: (255, 0, 255),
2: (0, 255, 0),
3: (255, 0, 0),
4: (0, 0, 255),
5: (255, 255, 0),
6: (128, 128, 0),
7: (128, 0, 128),
8: (0, 128, 128),
9: (128, 128, 128),
10: (0, 0, 128),
11: (0, 128, 0)
}
# ------------------ 匹配图片 ------------------
def find_matching_image(txt_path, input_root):
"""
强力匹配
- 去掉 _PartClass
- 去掉 .txt
- 如果有 .jpg TXT 名里也去掉
- 模糊匹配核心名和图片名
"""
txt_name = os.path.basename(txt_path).lower()
# 去掉 _partclass 和 .txt
base_name = re.sub(r'(_partclass)?\.txt$', '', txt_name)
# 再去掉可能残留的 .jpg
base_name = re.sub(r'\.jpg$', '', base_name)
for root, _, files in os.walk(input_root):
for f in files:
if f.lower().endswith((".jpg", ".jpeg", ".png")):
img_base = os.path.splitext(f)[0].lower()
if base_name == img_base:
return os.path.join(root, f)
return None
# ------------------ 处理函数 ------------------
def process_pixel_txt(img_path, txt_path, class_map, output_root):
image = cv2.imread(img_path)
if image is None:
return False
h, w = image.shape[:2]
vis_img = image.copy()
yolo_labels = []
unknown_labels = set()
with open(txt_path, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
if len(parts) < 5:
continue
try:
x, y, w_box, h_box = map(int, parts[:4])
except:
continue
label = parts[4]
cls_id = class_map.get(label, -1)
if cls_id == -1:
unknown_labels.add(label)
continue
poly = [(x, y), (x+w_box, y), (x+w_box, y+h_box), (x, y+h_box)]
hull = convex_hull_poly(poly)
yolo_labels.append(f"{cls_id} " + " ".join(map(str, polygon_to_yolo(hull, w, h))))
cv2.polylines(vis_img, [np.array(hull, np.int32)], True,
color=color_map.get(cls_id,(255,255,255)), thickness=2)
if unknown_labels:
print(f"⚠️ 未知类别 {unknown_labels} 在文件: {txt_path}")
if not yolo_labels:
return False
base = os.path.splitext(os.path.basename(img_path))[0]
os.makedirs(os.path.join(output_root,"images"), exist_ok=True)
os.makedirs(os.path.join(output_root,"labels"), exist_ok=True)
os.makedirs(os.path.join(output_root,"visual"), exist_ok=True)
shutil.copy2(img_path, os.path.join(output_root,"images", os.path.basename(img_path)))
with open(os.path.join(output_root,"labels", base+".txt"), "w", encoding="utf-8") as f:
f.write("\n".join(yolo_labels))
cv2.imwrite(os.path.join(output_root,"visual", base+"-visual.jpg"), vis_img)
print(f"✅ 已处理像素点 TXT: {base}")
return True
def process_grid_txt(img_path, txt_path, class_map, output_root):
image = cv2.imread(img_path)
if image is None:
return False
h, w = image.shape[:2]
cell_width, cell_height = 108, 102
cols = max(1, w // cell_width)
vis_img = image.copy()
overlay = image.copy()
alpha = 0.5
yolo_labels = []
with open(txt_path,"r",encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
numbers = re.findall(r"(\d+)(?=-|$)", line.split()[-1])
numbers = [int(n) for n in numbers]
cname = None
for key in class_map.keys():
if line.startswith(key):
cname = key
break
if cname is None or not numbers:
continue
for num in numbers:
x1, y1, x2, y2 = num_to_coord(num, cols, cell_width, cell_height)
cv2.rectangle(overlay, (x1,y1), (x2,y2), color_map.get(class_map[cname],(128,128,128)),-1)
cv2.addWeighted(overlay, alpha, image, 1-alpha, 0, image)
points = []
for num in numbers:
x1, y1, x2, y2 = num_to_coord(num, cols, cell_width, cell_height)
points.extend([(x1,y1),(x2,y1),(x2,y2),(x1,y2)])
hull = convex_hull_poly(points)
cls_id = class_map[cname]
pts = np.array(hull, np.int32).reshape((-1,1,2))
cv2.polylines(vis_img, [pts], True, color_map.get(cls_id,(128,128,128)), 2)
yolo_labels.append(f"{cls_id} " + " ".join(map(str, polygon_to_yolo(hull, w, h))))
if not yolo_labels:
return False
base = os.path.splitext(os.path.basename(img_path))[0]
shutil.copy2(img_path, os.path.join(output_root,"images", os.path.basename(img_path)))
with open(os.path.join(output_root,"labels", base+".txt"), "w", encoding="utf-8") as f:
f.write("\n".join(yolo_labels))
cv2.imwrite(os.path.join(output_root,"visual", base+"-visual.jpg"), vis_img)
cv2.imwrite(os.path.join(output_root,"highlighted", base+"-highlighted.jpg"), image)
return True
# ------------------ 批量处理 ------------------
def batch_process_txt_first(input_root, class_map, output_root="output", max_workers=4):
os.makedirs(os.path.join(output_root,"images"), exist_ok=True)
os.makedirs(os.path.join(output_root,"labels"), exist_ok=True)
os.makedirs(os.path.join(output_root,"visual"), exist_ok=True)
os.makedirs(os.path.join(output_root,"highlighted"), exist_ok=True)
# 收集所有 TXT 文件
txt_files = []
for root, _, files in os.walk(input_root):
for file in files:
if file.lower().endswith(".txt"):
txt_files.append(os.path.join(root, file))
success_count, fail_count = 0, 0
log_lines = []
fail_logs = []
def process_single(txt_path):
nonlocal success_count, fail_count
img_path = find_matching_image(txt_path, input_root)
if img_path:
try:
if "_partclass" in txt_path.lower():
status = process_grid_txt(img_path, txt_path, class_map, output_root)
log_lines.append(f"{os.path.basename(txt_path)} -> Grid TXT processed with {os.path.basename(img_path)}")
else:
status = process_pixel_txt(img_path, txt_path, class_map, output_root)
log_lines.append(f"{os.path.basename(txt_path)} -> Pixel TXT processed with {os.path.basename(img_path)}")
if status:
success_count += 1
else:
fail_count += 1
fail_logs.append(f"{os.path.basename(txt_path)} -> Processed but no valid labels generated")
except Exception as e:
fail_count += 1
fail_logs.append(f"{os.path.basename(txt_path)} -> Processing error: {e}")
else:
fail_count += 1
fail_logs.append(f"{os.path.basename(txt_path)} -> No matching image found")
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=max_workers) as executor:
executor.map(process_single, txt_files)
# 写入日志
log_file = os.path.join(output_root, "process_log.txt")
with open(log_file, "w", encoding="utf-8") as f:
f.write("\n".join(log_lines + ["\n失败文件:"] + fail_logs))
print(f"\n✅ 批量处理完成: 成功 {success_count}, 失败 {fail_count}")
if fail_logs:
print("⚠️ 失败文件及原因如下:")
for line in fail_logs:
print(line)
print(f"📄 处理日志已保存: {log_file}")
# ------------------ 主程序 ------------------
if __name__ == "__main__":
input_root = r"D:\work\develop\LF-where\01"
output_root = r"D:\work\develop\LF-where\out"
class_map = {
"裂缝": 0,
"横向裂缝": 1,
"纵向裂缝": 2,
"修补": 3,
"坑洞": 4,
"网裂": 5,
"破碎板":6,
}
batch_process_txt_first(input_root, class_map, output_root, max_workers=8)

View File

@ -0,0 +1,258 @@
import os
import re
import cv2
import numpy as np
import shutil
from concurrent.futures import ThreadPoolExecutor
# ------------------ 工具函数 ------------------
def clean_filename(name):
name = name.strip()
name = re.sub(r'[\s\r\n\t]+', '', name)
return name.lower()
def num_to_coord(num, cols, cell_width, cell_height, offset=1):
n = num - 1 + offset
r = n // cols
c = n % cols
x1 = c * cell_width
y1 = r * cell_height
x2 = x1 + cell_width
y2 = y1 + cell_height
return x1, y1, x2, y2
def polygon_to_yolo(poly, img_width, img_height):
flat = [coord for point in poly for coord in point]
return [flat[i] / (img_width if i % 2 == 0 else img_height) for i in range(len(flat))]
def convex_hull_poly(points):
if not points:
return []
pts = np.array(points, dtype=np.int32)
hull = cv2.convexHull(pts)
return hull.reshape(-1, 2).tolist()
def expand_polygon(poly, expand_px=3):
"""多边形顶点膨胀 expand_px 像素"""
if not poly:
return []
pts = np.array(poly, dtype=np.int32)
cx = np.mean(pts[:, 0])
cy = np.mean(pts[:, 1])
vec = pts - np.array([[cx, cy]])
norm = np.linalg.norm(vec, axis=1, keepdims=True)
norm[norm == 0] = 1
vec_unit = vec / norm
pts_expanded = pts + (vec_unit * expand_px).astype(int)
return pts_expanded.tolist()
color_map = {
0: (0, 255, 255), 1: (255, 0, 255), 2: (0, 255, 0), 3: (255, 0, 0),
4: (0, 0, 255), 5: (255, 255, 0), 6: (128, 128, 0), 7: (128, 0, 128),
8: (0, 128, 128), 9: (128, 128, 128), 10: (0, 0, 128), 11: (0, 128, 0)
}
# ------------------ 匹配图片 ------------------
def find_matching_image(txt_path, input_root):
txt_name = os.path.basename(txt_path).lower()
base_name = re.sub(r'(_partclass)?\.txt$', '', txt_name)
base_name = re.sub(r'\.jpg$', '', base_name)
for root, _, files in os.walk(input_root):
for f in files:
if f.lower().endswith((".jpg", ".jpeg", ".png")):
img_base = os.path.splitext(f)[0].lower()
if base_name == img_base:
return os.path.join(root, f)
return None
# ------------------ 原有处理函数(带多边形膨胀) ------------------
def process_pixel_txt(img_path, txt_path, class_map, output_root):
image = cv2.imread(img_path)
if image is None:
return False
h, w = image.shape[:2]
vis_img = image.copy()
yolo_labels = []
unknown_labels = set()
with open(txt_path, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split()
if len(parts) < 5:
continue
try:
x, y, w_box, h_box = map(int, parts[:4])
except:
continue
label = parts[4]
cls_id = class_map.get(label, -1)
if cls_id == -1:
unknown_labels.add(label)
continue
poly = [(x, y), (x + w_box, y), (x + w_box, y + h_box), (x, y + h_box)]
hull = convex_hull_poly(poly)
hull = expand_polygon(hull, expand_px=3)
yolo_labels.append(f"{cls_id} " + " ".join(map(str, polygon_to_yolo(hull, w, h))))
cv2.polylines(vis_img, [np.array(hull, np.int32)], True,
color=color_map.get(cls_id, (255, 255, 255)), thickness=2)
if unknown_labels:
print(f"⚠️ 未知类别 {unknown_labels} 在文件: {txt_path}")
if not yolo_labels:
return False
base = os.path.splitext(os.path.basename(img_path))[0]
folder_img = os.path.join(output_root, "images")
os.makedirs(folder_img, exist_ok=True)
shutil.copy2(img_path, os.path.join(folder_img, os.path.basename(img_path)))
folder_labels = os.path.join(output_root, "labels")
os.makedirs(folder_labels, exist_ok=True)
with open(os.path.join(folder_labels, base + ".txt"), "w", encoding="utf-8") as f:
f.write("\n".join(yolo_labels))
folder_vis = os.path.join(output_root, "visual")
os.makedirs(folder_vis, exist_ok=True)
cv2.imwrite(os.path.join(folder_vis, base + "-visual.jpg"), vis_img)
print(f"✅ 已处理 Pixel TXT: {base}")
return True
def process_grid_txt(img_path, txt_path, class_map, output_root):
image = cv2.imread(img_path)
if image is None:
return False
h, w = image.shape[:2]
cell_width, cell_height = 108, 102
cols = max(1, w // cell_width)
vis_img = image.copy()
overlay = image.copy()
alpha = 0.5
yolo_labels = []
with open(txt_path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
numbers = re.findall(r"(\d+)(?=-|$)", line.split()[-1])
numbers = [int(n) for n in numbers]
cname = None
for key in class_map.keys():
if line.startswith(key):
cname = key
break
if cname is None or not numbers:
continue
for num in numbers:
x1, y1, x2, y2 = num_to_coord(num, cols, cell_width, cell_height)
cv2.rectangle(overlay, (x1, y1), (x2, y2), color_map.get(class_map[cname], (128, 128, 128)), -1)
cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0, image)
points = []
for num in numbers:
x1, y1, x2, y2 = num_to_coord(num, cols, cell_width, cell_height)
points.extend([(x1, y1), (x2, y1), (x2, y2), (x1, y2)])
hull = convex_hull_poly(points)
hull = expand_polygon(hull, expand_px=3)
cls_id = class_map[cname]
pts = np.array(hull, np.int32).reshape((-1, 1, 2))
cv2.polylines(vis_img, [pts], True, color_map.get(cls_id, (128, 128, 128)), 2)
yolo_labels.append(f"{cls_id} " + " ".join(map(str, polygon_to_yolo(hull, w, h))))
if not yolo_labels:
return False
base = os.path.splitext(os.path.basename(img_path))[0]
for folder_name in ["images", "labels", "visual", "highlighted"]:
folder = os.path.join(output_root, folder_name)
os.makedirs(folder, exist_ok=True)
shutil.copy2(img_path, os.path.join(output_root, "images", os.path.basename(img_path)))
with open(os.path.join(output_root, "labels", base + ".txt"), "w", encoding="utf-8") as f:
f.write("\n".join(yolo_labels))
cv2.imwrite(os.path.join(output_root, "visual", base + "-visual.jpg"), vis_img)
cv2.imwrite(os.path.join(output_root, "highlighted", base + "-highlighted.jpg"), image)
print(f"✅ 已处理 Grid TXT: {base}")
return True
# ------------------ 批量处理函数 ------------------
def batch_process_txt_first(input_root, output_root, mode_type="5211", max_workers=4):
if mode_type == "5211":
class_maps = {
"asphalt": class_map_asphalt,
"cream": class_map_cream,
"gravel": class_map_gravel,
}
elif mode_type == "5210":
class_maps = {
"asphalt": class_map_asphalt_road,
"cream": class_map_cream_road,
"gravel": class_map_gravel,
}
else:
raise ValueError("mode_type 必须是 '5210''5211'")
txt_files = []
for root, _, files in os.walk(input_root):
for file in files:
if file.lower().endswith(".txt"):
txt_files.append(os.path.join(root, file))
def process_single(txt_path):
img_path = find_matching_image(txt_path, input_root)
if not img_path:
print(f"⚠️ 未找到匹配图片: {txt_path}")
return
with open(txt_path, "r", encoding="utf-8") as f:
content = f.read().lower()
if any(k in content for k in ["水泥", "cement", "cream"]):
road_type = "cream"
elif any(k in content for k in ["沥青", "asphalt"]):
road_type = "asphalt"
else:
road_type = "gravel"
selected_map = class_maps[road_type]
output_folder = os.path.join(output_root, road_type)
os.makedirs(output_folder, exist_ok=True)
if "_partclass" in txt_path.lower():
process_grid_txt(img_path, txt_path, selected_map, output_folder)
else:
process_pixel_txt(img_path, txt_path, selected_map, output_folder)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
executor.map(process_single, txt_files)
# ------------------ 类别映射 ------------------
class_map_asphalt = {
# "纵向裂缝": 0, "横向裂缝": 1, "网裂": 2, "坑槽": 3, "松散": 4,
"龟裂": 0, "块状裂缝": 1, "纵向裂缝": 2, "横向裂缝": 3, "沉陷": 4, "车辙": 5,
"波浪拥包": 6, "坑槽": 7, "松散": 8, "泛油": 9, "修补": 10,
}
class_map_cream = {
"破碎板": 0, "裂缝": 1, "坑洞": 2, "露骨": 3, "错台": 4, "拱起": 5,
}
class_map_gravel = {
"坑槽": 0, "沉陷": 1, "车辙": 2, "波浪搓板": 3,
}
class_map_asphalt_road = {
"龟裂": 0, "块状裂缝": 1, "纵向裂缝": 2, "横向裂缝": 3, "沉陷": 4, "车辙": 5,
"波浪拥包": 6, "坑槽": 7, "松散": 8, "泛油": 9, "修补": 10,
}
class_map_cream_road = {
"破碎板": 0, "裂缝": 1, "板角断裂": 2, "错台": 3, "拱起": 4, "边角剥落": 5,
"接缝料损坏": 6, "坑洞": 7, "唧泥": 8, "露骨": 9, "修补": 10,
}
# ------------------ 主程序 ------------------
if __name__ == "__main__":
input_root = r"D:\work\develop\LF-where\01"
output_root = r"D:\work\develop\LF-where\out"
mode_type = "5211" # 5211 或 5210
batch_process_txt_first(input_root, output_root, mode_type, max_workers=8)

View File

@ -0,0 +1,146 @@
import os
import re
import cv2
import zipfile
import shutil
import numpy as np
from concurrent.futures import ThreadPoolExecutor
# ------------------ 路面类别映射集 ------------------
CLASS_MAPS_5010 = {
"asphalt": {
"龟裂": 0, "块状裂缝": 1, "纵向裂缝": 2, "横向裂缝": 3,
"沉陷": 4, "车辙": 5, "波浪拥包": 6, "坑槽": 7,
"松散": 8, "泛油": 9, "修补": 10,
},
"cream": {"破碎板": 0, "裂缝": 1, "坑洞": 2, "露骨": 3, "错台": 4, "拱起": 5},
"gravel": {"坑槽": 0, "沉陷": 1, "车辙": 2, "波浪搓板": 3},
}
CLASS_MAPS_5011 = {
"asphalt": {
"龟裂": 0, "块状裂缝": 1, "纵向裂缝": 2, "横向裂缝": 3,
"沉陷": 4, "车辙": 5, "波浪拥包": 6, "坑槽": 7,
"松散": 8, "泛油": 9, "修补": 10,
},
"cream": {
"破碎板": 0, "裂缝": 1, "板角断裂": 2, "错台": 3,
"拱起": 4, "边角剥落": 5, "接缝料损坏": 6,
"坑洞": 7, "唧泥": 8, "露骨": 9, "修补": 10,
},
"gravel": {}, # 可扩展
}
# ------------------ 工具函数 ------------------
def detect_road_type(text: str) -> str:
"""检测路面类型"""
low = text.lower()
if "沥青" in low:
return "asphalt"
if "水泥" in low:
return "cream"
if "石子" in low or "gravel" in low:
return "gravel"
return "asphalt"
def detect_section_type(path: str) -> str:
"""判断属于5010还是5011"""
name = os.path.basename(path)
if "5011" in name:
return "5011"
if "5010" in name:
return "5010"
# 若未明确默认5010
return "5010"
def clean_name(name):
return re.sub(r'[\s\r\n\t]+', '', name)
# ------------------ YOLO-Seg 主函数 ------------------
def yoloseg_to_grid_cells_fixed_v6(image_path, label_path, output_dir, class_map, extra_info=None):
img = cv2.imread(image_path)
if img is None:
return
h, w = img.shape[:2]
txt_name = os.path.splitext(os.path.basename(label_path))[0]
output_txt = os.path.join(output_dir, f"{txt_name}.txt")
with open(label_path, "r", encoding="utf-8") as f:
lines = f.readlines()
with open(output_txt, "w", encoding="utf-8") as out:
for line in lines:
parts = line.strip().split()
if len(parts) < 2:
continue
cls_name = parts[-1]
cls_id = class_map.get(cls_name, -1)
if cls_id == -1:
print(f"未识别类别: {cls_name}")
continue
coords = list(map(float, parts[:-1]))
coords = [round(c, 4) for c in coords]
out.write(f"{cls_id} {' '.join(map(str, coords))} {extra_info or ''}\n")
# ------------------ 批处理函数 ------------------
def process_zip_yoloseg_with_draw(zip_path, output_root):
temp_dir = "temp_extract"
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.makedirs(temp_dir, exist_ok=True)
# 解压
with zipfile.ZipFile(zip_path, 'r') as zf:
zf.extractall(temp_dir)
image_files = []
for root, _, files in os.walk(temp_dir):
for f in files:
if f.lower().endswith((".jpg", ".png", ".jpeg")):
image_files.append(os.path.join(root, f))
print(f"检测到 {len(image_files)} 张图片")
def process_one(img_path):
name = os.path.splitext(os.path.basename(img_path))[0]
label_path = os.path.join(os.path.dirname(img_path), f"{name}.txt")
if not os.path.exists(label_path):
print(f"缺少标签: {img_path}")
return
# 判断5010或5011
section_type = detect_section_type(img_path)
# 读取标签判断路面类型
with open(label_path, "r", encoding="utf-8") as f:
txt = f.read()
road_type = detect_road_type(txt)
# 选择对应类别映射表
if section_type == "5011":
class_map = CLASS_MAPS_5011.get(road_type, {})
else:
class_map = CLASS_MAPS_5010.get(road_type, {})
if not class_map:
print(f"无映射表: {road_type} ({section_type})")
return
out_dir = os.path.join(output_root, road_type, name)
os.makedirs(out_dir, exist_ok=True)
extra_info = f"{section_type}_{road_type}"
yoloseg_to_grid_cells_fixed_v6(img_path, label_path, out_dir, class_map, extra_info=extra_info)
with ThreadPoolExecutor(max_workers=6) as ex:
list(ex.map(process_one, image_files))
shutil.rmtree(temp_dir)
print("处理完成")
# ------------------ 主入口 ------------------
if __name__ == "__main__":
zip_path = r"D:\work\data\road_seg.zip" # 输入zip路径
output_root = r"D:\work\data\output" # 输出根目录
process_zip_yoloseg_with_draw(zip_path, output_root)

View File

@ -0,0 +1,186 @@
import os
import zipfile
import shutil
import cv2
import numpy as np
from collections import defaultdict
# ---------------- 常量 ----------------
CELL_AREA = 0.0036 # 每格面积 (平方米)
GRID_WIDTH = 108 # 网格像素宽
GRID_HEIGHT = 102 # 网格像素高
COVER_RATIO = 0.01 # mask 覆盖比例阈值
# ---------------- 路面类别映射 ----------------
CLASS_MAP_ASPHALT = {
"龟裂":0,"块状裂缝":1,"纵向裂缝":2,"横向裂缝":3,"沉陷":4,"车辙":5,"波浪拥包":6,"坑槽":7,"松散":8,"泛油":9,"修补":10
}
CLASS_MAP_CEMENT = {
"破碎板":0,"裂缝":1,"板角断裂":2,"错台":3,"拱起":4,"边角剥落":5,"接缝料损坏":6,"坑洞":7,"唧泥":8,"露骨":9,"修补":10
}
CLASS_MAP_GRAVEL = {
"坑槽":0,"沉陷":1,"车辙":2,"波浪搓板":3
}
# ---------------- 工具函数 ----------------
def num_to_coord(num, cols, cell_w, cell_h):
n = num - 1
r, c = divmod(n, cols)
x1, y1 = c * cell_w, r * cell_h
x2, y2 = x1 + cell_w, y1 + cell_h
return x1, y1, x2, y2
def draw_grid_on_image(image_path, grid_cells, cell_size=(GRID_WIDTH, GRID_HEIGHT), save_path=None):
image = cv2.imread(image_path)
if image is None: return
h, w = image.shape[:2]
cell_w, cell_h = cell_size
cols = w // cell_w
overlay = image.copy()
for cname, nums in grid_cells.items():
color = (np.random.randint(64,255),np.random.randint(64,255),np.random.randint(64,255))
for num in nums:
x1,y1,x2,y2 = num_to_coord(num, cols, cell_w, cell_h)
cv2.rectangle(overlay,(x1,y1),(x2,y2),color,-1)
cv2.addWeighted(overlay,0.4,image,0.6,0,image)
for i in range(0, w, cell_w):
cv2.line(image,(i,0),(i,h),(100,100,100),1)
for j in range(0, h, cell_h):
cv2.line(image,(0,j),(w,j),(100,100,100),1)
if save_path: cv2.imwrite(save_path,image)
return image
def detect_road_type_from_content(label_file):
"""根据标签内容判断路面类型"""
try:
with open(label_file,'r',encoding='utf-8') as f:
content = f.read()
except:
return "gravel"
for kw in CLASS_MAP_ASPHALT.keys():
if kw in content: return "asphalt"
for kw in CLASS_MAP_CEMENT.keys():
if kw in content: return "cement"
for kw in CLASS_MAP_GRAVEL.keys():
if kw in content: return "gravel"
return "gravel"
def yoloseg_to_grid(image_path,label_file,cover_ratio=COVER_RATIO):
"""将YOLO-Seg标签转换成格子编号和类别"""
road_type = detect_road_type_from_content(label_file)
if road_type=="asphalt": class_map = CLASS_MAP_ASPHALT
elif road_type=="cement": class_map = CLASS_MAP_CEMENT
else: class_map = CLASS_MAP_GRAVEL
class_names = list(class_map.keys())
img = cv2.imread(image_path)
if img is None: return "", {}
h, w = img.shape[:2]
cols = max(1, w//GRID_WIDTH)
rows = max(1, h//GRID_HEIGHT)
result_lines = []
all_class_cells = {}
with open(label_file,'r',encoding='utf-8') as f:
for line in f:
parts = line.strip().split()
if len(parts)<5: continue
cls_id = int(parts[0])
coords = [float(x) for x in parts[1:]]
if len(coords)%2!=0: coords=coords[:-1]
if len(coords)<6: continue
poly = np.array(coords,dtype=np.float32).reshape(-1,2)
poly[:,0]*=w
poly[:,1]*=h
mask = np.zeros((h,w),dtype=np.uint8)
cv2.fillPoly(mask,[poly.astype(np.int32)],255)
covered_cells=[]
for r in range(rows):
for c in range(cols):
x1,y1 = c*GRID_WIDTH, r*GRID_HEIGHT
x2,y2 = min(w,x1+GRID_WIDTH), min(h,y1+GRID_HEIGHT)
region = mask[y1:y2, x1:x2]
if np.count_nonzero(region)/region.size>cover_ratio:
covered_cells.append(r*cols+c+1)
if not covered_cells: continue
cname = class_names[cls_id] if cls_id<len(class_names) else str(cls_id)
ids_str = '-'.join(map(str,sorted(covered_cells)))+'-'
result_lines.append(f"{cname} {ids_str}")
if cname not in all_class_cells: all_class_cells[cname]=set()
all_class_cells[cname].update(covered_cells)
return '\n'.join(result_lines), all_class_cells, road_type
def generate_header(road_type):
if road_type=="asphalt": return "起点桩号(km),识别宽度(m),破损率DR(%),龟裂,块状裂缝,纵向裂缝,横向裂缝,沉陷,车辙,波浪拥包,坑槽,松散,泛油,修补"
if road_type=="cement": return "起点桩号(km),识别宽度(m),破损率DR(%),破碎板,裂缝,板角断裂,错台,拱起,边角剥落,接缝料损坏,坑洞,唧泥,露骨,修补"
if road_type=="gravel": return "起点桩号(km),识别宽度(m),破损率DR(%),坑槽,沉陷,车辙,波浪搓板"
return ""
# ---------------- 主函数 ----------------
def process_zip(zip_path,pile_map_file,output_dir="output",cell_area=CELL_AREA,grid_width=GRID_WIDTH,grid_height=GRID_HEIGHT):
if not os.path.exists(zip_path):
raise FileNotFoundError(f"{zip_path} 不存在")
os.makedirs(output_dir,exist_ok=True)
# 解压
with zipfile.ZipFile(zip_path,'r') as zip_ref:
zip_ref.extractall(output_dir)
# 读取桩号映射
pile_dict = {}
with open(pile_map_file,'r',encoding='utf-8') as f:
for line in f:
parts = line.strip().split("->")
if len(parts)>=4:
pile_dict[parts[3]]=parts[1] # filename -> 桩号
# 遍历图片
summary_data = []
for root,_,files in os.walk(output_dir):
for file in files:
if file.lower().endswith((".jpg",".png",".jpeg",".bmp")):
image_path = os.path.join(root,file)
label_file = os.path.splitext(image_path)[0]+".txt"
if not os.path.exists(label_file):
print(f"⚠️ 找不到标签: {label_file}")
continue
out_txt, class_cells, road_type = yoloseg_to_grid(image_path,label_file)
# 写每张图独立 _grid.txt
grid_txt_path = os.path.splitext(image_path)[0]+"_grid.txt"
with open(grid_txt_path,'w',encoding='utf-8') as f:
f.write(out_txt)
# 生成网格可视化
draw_grid_on_image(image_path,class_cells,save_path=os.path.splitext(image_path)[0]+"_grid.jpg")
# 统计各类面积
counts = {k:len(v)*cell_area for k,v in class_cells.items()}
total_area = sum(counts.values())
# 桩号
pile_no = pile_dict.get(file,"未知")
# 破损率 DR (%) = total_area / 总面积
DR = total_area/ (total_area if total_area>0 else 1) *100 # 简化为100%或者0
summary_data.append((pile_no, DR, counts, road_type))
# 写桩号问题列表.txt
if summary_data:
road_type = summary_data[0][3]
out_file = os.path.join(output_dir,"桩号问题列表.txt")
header = generate_header(road_type)
with open(out_file,'w',encoding='utf-8') as f:
f.write(header+'\n')
for pile_no,DR,counts,rt in summary_data:
row = [pile_no,"3.6",f"{DR:.2f}"]
if road_type=="asphalt":
keys = list(CLASS_MAP_ASPHALT.keys())
elif road_type=="cement":
keys = list(CLASS_MAP_CEMENT.keys())
else:
keys = list(CLASS_MAP_GRAVEL.keys())
for k in keys:
row.append(f"{counts.get(k,0):.2f}")
f.write(','.join(row)+'\n')
print(f"✅ 输出完成: {out_file}")
# ---------------- 示例调用 ----------------
if __name__=="__main__":
zip_path = "dataset.zip" # 输入 ZIP 文件
pile_map_file = "pile_map.txt" # 图片名 -> 桩号
process_zip(zip_path,pile_map_file,output_dir="output")