yoooooger
This commit is contained in:
parent
faa2c610da
commit
688624aaad
5
.vscode/settings.json
vendored
Normal file
5
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"python-envs.defaultEnvManager": "ms-python.python:conda",
|
||||
"python-envs.defaultPackageManager": "ms-python.python:conda",
|
||||
"python-envs.pythonProjects": []
|
||||
}
|
370
Ai_tottle/ai_tottle_api copy.py
Normal file
370
Ai_tottle/ai_tottle_api copy.py
Normal file
@ -0,0 +1,370 @@
|
||||
from sanic import Sanic, json, Blueprint,response
|
||||
from sanic.exceptions import Unauthorized
|
||||
from sanic.response import json as json_response
|
||||
from sanic_cors import CORS
|
||||
import numpy as np
|
||||
import logging
|
||||
import uuid
|
||||
import os,traceback
|
||||
import asyncio
|
||||
from ai_image import process_images # 你实现的图片处理函数
|
||||
from queue import Queue
|
||||
from map_find import map_process_images
|
||||
from yolo_train import auto_train
|
||||
import torch
|
||||
from yolo_photo import map_process_images_with_progress # 引入你的处理函数
|
||||
from tiles import TilesetProcessor
|
||||
# 日志配置
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
###################################################################################验证中间件和管理件##############################################################################################
|
||||
|
||||
async def token_and_resource_check(request):
|
||||
# --- Token 验证 ---
|
||||
token = request.headers.get('X-API-Token')
|
||||
expected_token = request.app.config.get("VALID_TOKEN")
|
||||
if not token or token != expected_token:
|
||||
logger.warning(f"Unauthorized request with token: {token}")
|
||||
raise Unauthorized("Invalid token")
|
||||
|
||||
# --- GPU 使用率检查 ---
|
||||
try:
|
||||
if torch.cuda.is_available():
|
||||
num_gpus = torch.cuda.device_count()
|
||||
max_usage_ratio = request.app.config.get("MAX_GPU_USAGE", 0.9) # 默认90%
|
||||
|
||||
for i in range(num_gpus):
|
||||
used = torch.cuda.memory_reserved(i)
|
||||
total = torch.cuda.max_memory_reserved(i)
|
||||
ratio = used / total if total else 0
|
||||
|
||||
logger.info(f"GPU {i} Usage: {ratio:.2%}")
|
||||
|
||||
if ratio > max_usage_ratio:
|
||||
logger.warning(f"GPU {i} usage too high: {ratio:.2%}")
|
||||
return json_response({
|
||||
"status": "error",
|
||||
"message": f"GPU resource busy (GPU {i} at {ratio:.2%}). Try later."
|
||||
}, status=503)
|
||||
except Exception as e:
|
||||
logger.error(f"GPU check failed: {e}")
|
||||
|
||||
return None # 允许请求继续
|
||||
|
||||
##################################################################################################################################################################################################
|
||||
#创建Sanic应用
|
||||
app = Sanic("ai_Service_v2")
|
||||
CORS(app) # 允许跨域请求
|
||||
task_progress = {}
|
||||
|
||||
@app.middleware("request")
|
||||
async def global_middleware(request):
|
||||
result = await token_and_resource_check(request)
|
||||
if result:
|
||||
return result
|
||||
|
||||
# 配置Token和最大GPU使用率
|
||||
app.config.update({
|
||||
"VALID_TOKEN": "Beidou_b8609e96-bfec-4485-8c64-6d4f662ee44a",
|
||||
"MAX_GPU_USAGE": 0.9
|
||||
})
|
||||
|
||||
######################################################################地图切割相关的API########################################################################################################
|
||||
#创建地图的蓝图
|
||||
map_tile_blueprint = Blueprint('map', url_prefix='/map/')
|
||||
app.blueprint(map_tile_blueprint)
|
||||
|
||||
# @map_tile_blueprint.post("/compare_tilesets")
|
||||
# async def compare_tilesets(request):
|
||||
# '''
|
||||
# 接口:/map/compare_tilesets
|
||||
# 输入 JSON:
|
||||
# {
|
||||
# "tileset1": "path/to/tileset1/tileset.json",
|
||||
# "tileset2": "path/to/tileset2/tileset.json",
|
||||
# "bounds": [500000, 3000000, 500100, 3000100],
|
||||
# "resolution": 1.0,
|
||||
# "output": "results"
|
||||
# }
|
||||
# 输出 JSON:
|
||||
# {
|
||||
# "success": true,
|
||||
# "message": "分析完成",
|
||||
# "data": {
|
||||
# "csv_path": "results/height_differences.csv",
|
||||
# "heatmap_path": "results/height_difference_heatmap.png",
|
||||
# "summary": {
|
||||
# "mean": 0.28,
|
||||
# "max": 1.10,
|
||||
# "min": -0.85,
|
||||
# "std": 0.23
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# '''
|
||||
# try:
|
||||
# body = request.json
|
||||
|
||||
# # 参数提取与验证
|
||||
# tileset1 = body.get("tileset1")
|
||||
# tileset2 = body.get("tileset2")
|
||||
# bounds = body.get("bounds")
|
||||
# resolution = body.get("resolution", 1.0)
|
||||
# output = body.get("output", "results")
|
||||
|
||||
# if not all([tileset1, tileset2, bounds]):
|
||||
# return response.json({"success": False, "message": "参数不完整"}, status=400)
|
||||
|
||||
# processor = TilesetProcessor(tileset1, tileset2, resolution)
|
||||
|
||||
# if not processor.set_analysis_area(bounds):
|
||||
# return response.json({"success": False, "message": "设置分析区域失败"}, status=400)
|
||||
|
||||
# if not processor.sample_heights():
|
||||
# return response.json({"success": False, "message": "高度采样失败"}, status=500)
|
||||
|
||||
# processor.export_results(output)
|
||||
|
||||
# # 汇总统计结果
|
||||
# valid_differences = processor.height_difference_grid[~np.isnan(processor.height_difference_grid)]
|
||||
# summary = {
|
||||
# "mean": float(np.mean(valid_differences)),
|
||||
# "max": float(np.max(valid_differences)),
|
||||
# "min": float(np.min(valid_differences)),
|
||||
# "std": float(np.std(valid_differences))
|
||||
# }
|
||||
|
||||
# return response.json({
|
||||
# "success": True,
|
||||
# "message": "分析完成",
|
||||
# "data": {
|
||||
# "csv_path": os.path.join(output, "height_differences.csv"),
|
||||
# "heatmap_path": os.path.join(output, "height_difference_heatmap.png"),
|
||||
# "summary": summary
|
||||
# }
|
||||
# })
|
||||
|
||||
# except Exception as e:
|
||||
# traceback.print_exc()
|
||||
# return response.json({"success": False, "message": str(e)}, status=500)
|
||||
|
||||
#语义识别
|
||||
@map_tile_blueprint.post("/uav")
|
||||
async def process_handler(request):
|
||||
"""
|
||||
接口:/map/uav
|
||||
输入 JSON:
|
||||
{
|
||||
"urls": [
|
||||
"http://example.com/img1.jpg",
|
||||
"http://example.com/img2.jpg"
|
||||
],
|
||||
"yaml_name": "config",
|
||||
"bucket_name": "300bdf2b-a150-406e-be63-d28bd29b409f",
|
||||
"bucket_directory": "2025/seg"
|
||||
"model_path": "deeplabv3plus_best.pth"
|
||||
}
|
||||
输出 JSON:
|
||||
{
|
||||
"code": 200,
|
||||
"msg": "success",
|
||||
"data": [
|
||||
"http://minio.example.com/uav-results/2025/seg/result1.png",
|
||||
"http://minio.example.com/uav-results/2025/seg/result2.png"
|
||||
]
|
||||
}
|
||||
|
||||
"""
|
||||
try:
|
||||
body = request.json
|
||||
urls = body.get("urls", [])
|
||||
yaml_name = body.get("yaml_name")
|
||||
bucket_name = body.get("bucket_name")
|
||||
bucket_directory = body.get("bucket_directory")
|
||||
model_path = os.path.join("map", "checkpoints", body.get("model_path"))
|
||||
|
||||
# 校验参数
|
||||
if not urls or not isinstance(urls, list):
|
||||
return json({"code": 400, "msg": "Missing or invalid 'urls'"})
|
||||
if not all([yaml_name, bucket_name, bucket_directory]):
|
||||
return json({"code": 400, "msg": "Missing required parameters"})
|
||||
|
||||
# 调用图像处理函数
|
||||
result = map_process_images(urls, yaml_name, bucket_name, bucket_directory,model_path)
|
||||
return json(result)
|
||||
|
||||
except Exception as e:
|
||||
return json({"code": 500, "msg": f"Server error: {str(e)}"})
|
||||
|
||||
######################################################################yolo相关的API########################################################################################################
|
||||
#创建yolo的蓝图
|
||||
yolo_tile_blueprint = Blueprint('yolo', url_prefix='/yolo/')
|
||||
app.blueprint(yolo_tile_blueprint)
|
||||
|
||||
# YOLO URL APT
|
||||
# 存储任务进度和结果(内存示例,可用 Redis 或 DB 持久化)
|
||||
|
||||
@yolo_tile_blueprint.post("/process_images")
|
||||
async def process_images(request):
|
||||
"""
|
||||
{
|
||||
"urls": [
|
||||
"http://example.com/image1.jpg",
|
||||
"http://example.com/image2.jpg",
|
||||
"http://example.com/image3.jpg"
|
||||
],
|
||||
"yaml_name": "your_minio_config",
|
||||
"bucket_name": "my-bucket",
|
||||
"bucket_directory": "2025/uav-results",
|
||||
"model_path": "deeplabv3plus_best.pth"
|
||||
}
|
||||
"""
|
||||
data = request.json
|
||||
urls = data.get("urls")
|
||||
yaml_name = data.get("yaml_name")
|
||||
bucket_name = data.get("bucket_name")
|
||||
bucket_directory = data.get("bucket_directory")
|
||||
uav_model_path = data.get("uav_model_path")
|
||||
|
||||
if not urls or not yaml_name or not bucket_name or not uav_model_path:
|
||||
return response.json({"code": 400, "msg": "Missing parameters"}, status=400)
|
||||
|
||||
task_id = str(uuid.uuid4())
|
||||
task_progress[task_id] = {"status": "pending", "progress": 0, "result": None}
|
||||
|
||||
# 启动后台任务
|
||||
asyncio.create_task(run_image_processing(task_id, urls, yaml_name, bucket_name, bucket_directory, uav_model_path))
|
||||
|
||||
return response.json({"code": 200, "msg": "Task started", "task_id": task_id})
|
||||
|
||||
@yolo_tile_blueprint.get("/task_status/<task_id>")
|
||||
async def task_status(request, task_id):
|
||||
progress = task_progress.get(task_id)
|
||||
if not progress:
|
||||
return response.json({"code": 404, "msg": "Task not found"}, status=404)
|
||||
return response.json({"code": 200, "msg": "Task status", "data": progress})
|
||||
|
||||
async def run_image_processing(task_id, urls, yaml_name, bucket_name, bucket_directory, uav_model_path):
|
||||
|
||||
|
||||
try:
|
||||
task_progress[task_id]["status"] = "running"
|
||||
task_progress[task_id]["progress"] = 10 # 开始进度
|
||||
|
||||
# 下载、推理、上传阶段分别更新进度
|
||||
def progress_callback(stage, percent):
|
||||
task_progress[task_id]["status"] = stage
|
||||
task_progress[task_id]["progress"] = percent
|
||||
|
||||
result = await asyncio.to_thread(
|
||||
map_process_images_with_progress,
|
||||
urls, yaml_name, bucket_name, bucket_directory, uav_model_path, progress_callback
|
||||
)
|
||||
|
||||
task_progress[task_id]["status"] = "completed"
|
||||
task_progress[task_id]["progress"] = 100
|
||||
task_progress[task_id]["result"] = result
|
||||
|
||||
except Exception as e:
|
||||
task_progress[task_id]["status"] = "failed"
|
||||
task_progress[task_id]["progress"] = 100
|
||||
task_progress[task_id]["result"] = str(e)
|
||||
|
||||
# YOLO检测API
|
||||
@yolo_tile_blueprint.post("/picture")
|
||||
async def yolo_detect_api(request):
|
||||
try:
|
||||
|
||||
detect_data = request.json
|
||||
|
||||
# 解析必要字段
|
||||
image_list = detect_data.get("image_list")
|
||||
yolo_model = detect_data.get("yolo_model", "best.pt")
|
||||
class_filter = detect_data.get("class", None)
|
||||
minio_info = detect_data.get("minio", None)
|
||||
|
||||
if not image_list:
|
||||
return json_response({"status": "error", "message": "image_list is required"}, status=400)
|
||||
|
||||
if not minio_info:
|
||||
return json_response({"status": "error", "message": "MinIO information is required"}, status=400)
|
||||
|
||||
# 创建临时文件夹
|
||||
input_folder = f"./temp_input_{str(uuid.uuid4())}"
|
||||
output_folder = f"./temp_output_{str(uuid.uuid4())}"
|
||||
|
||||
# 执行图像处理
|
||||
result = await asyncio.to_thread(
|
||||
process_images,
|
||||
yolo_model=yolo_model,
|
||||
image_list=image_list,
|
||||
class_filter=class_filter,
|
||||
input_folder=input_folder,
|
||||
output_folder=output_folder,
|
||||
minio_info=minio_info
|
||||
)
|
||||
|
||||
# 返回处理结果
|
||||
return json_response(result)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error occurred while processing request: {str(e)}", exc_info=True)
|
||||
return json_response({
|
||||
"status": "error",
|
||||
"message": f"Internal server error: {str(e)}"
|
||||
}, status=500)
|
||||
|
||||
# YOLO自动训练
|
||||
@yolo_tile_blueprint.post("/train")
|
||||
async def yolo_train_api(request):
|
||||
"""
|
||||
自动训练模型
|
||||
输入 JSON:
|
||||
{
|
||||
"db_host": str,
|
||||
"db_database": str,
|
||||
"db_user": str,
|
||||
"db_password": str,
|
||||
"db_port": int,
|
||||
"model_id": int,
|
||||
"img_path": str,
|
||||
"label_path": str,
|
||||
"new_path": str,
|
||||
"split_list": List[float],
|
||||
"class_names": Optional[List[str]],
|
||||
"project_name": str
|
||||
}
|
||||
输出 JSON:
|
||||
{
|
||||
"base_metrics": Dict[str, float],
|
||||
"best_model_path": str,
|
||||
"final_metrics": Dict[str, float]
|
||||
}
|
||||
|
||||
"""
|
||||
try:
|
||||
# 修改为直接访问 request.json 而不是调用它
|
||||
data = request.json
|
||||
|
||||
|
||||
if not data:
|
||||
return json_response({"status": "error", "message": "data is required"}, status=400)
|
||||
|
||||
# 执行图像处理
|
||||
result = await asyncio.to_thread(
|
||||
auto_train,
|
||||
data
|
||||
)
|
||||
# 返回处理结果
|
||||
return json_response(result)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error occurred while processing request: {str(e)}", exc_info=True)
|
||||
return json_response({
|
||||
"status": "error",
|
||||
"message": f"Internal server error: {str(e)}"
|
||||
}, status=500)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(host="0.0.0.0", port=12366, debug=True,workers=1)
|
@ -75,80 +75,6 @@ app.config.update({
|
||||
map_tile_blueprint = Blueprint('map', url_prefix='/map/')
|
||||
app.blueprint(map_tile_blueprint)
|
||||
|
||||
@map_tile_blueprint.post("/compare_tilesets")
|
||||
async def compare_tilesets(request):
|
||||
'''
|
||||
接口:/map/compare_tilesets
|
||||
输入 JSON:
|
||||
{
|
||||
"tileset1": "path/to/tileset1/tileset.json",
|
||||
"tileset2": "path/to/tileset2/tileset.json",
|
||||
"bounds": [500000, 3000000, 500100, 3000100],
|
||||
"resolution": 1.0,
|
||||
"output": "results"
|
||||
}
|
||||
输出 JSON:
|
||||
{
|
||||
"success": true,
|
||||
"message": "分析完成",
|
||||
"data": {
|
||||
"csv_path": "results/height_differences.csv",
|
||||
"heatmap_path": "results/height_difference_heatmap.png",
|
||||
"summary": {
|
||||
"mean": 0.28,
|
||||
"max": 1.10,
|
||||
"min": -0.85,
|
||||
"std": 0.23
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
try:
|
||||
body = request.json
|
||||
|
||||
# 参数提取与验证
|
||||
tileset1 = body.get("tileset1")
|
||||
tileset2 = body.get("tileset2")
|
||||
bounds = body.get("bounds")
|
||||
resolution = body.get("resolution", 1.0)
|
||||
output = body.get("output", "results")
|
||||
|
||||
if not all([tileset1, tileset2, bounds]):
|
||||
return response.json({"success": False, "message": "参数不完整"}, status=400)
|
||||
|
||||
processor = TilesetProcessor(tileset1, tileset2, resolution)
|
||||
|
||||
if not processor.set_analysis_area(bounds):
|
||||
return response.json({"success": False, "message": "设置分析区域失败"}, status=400)
|
||||
|
||||
if not processor.sample_heights():
|
||||
return response.json({"success": False, "message": "高度采样失败"}, status=500)
|
||||
|
||||
processor.export_results(output)
|
||||
|
||||
# 汇总统计结果
|
||||
valid_differences = processor.height_difference_grid[~np.isnan(processor.height_difference_grid)]
|
||||
summary = {
|
||||
"mean": float(np.mean(valid_differences)),
|
||||
"max": float(np.max(valid_differences)),
|
||||
"min": float(np.min(valid_differences)),
|
||||
"std": float(np.std(valid_differences))
|
||||
}
|
||||
|
||||
return response.json({
|
||||
"success": True,
|
||||
"message": "分析完成",
|
||||
"data": {
|
||||
"csv_path": os.path.join(output, "height_differences.csv"),
|
||||
"heatmap_path": os.path.join(output, "height_difference_heatmap.png"),
|
||||
"summary": summary
|
||||
}
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return response.json({"success": False, "message": str(e)}, status=500)
|
||||
|
||||
#语义识别
|
||||
@map_tile_blueprint.post("/uav")
|
||||
async def process_handler(request):
|
||||
|
BIN
Ai_tottle/temp.glb
Normal file
BIN
Ai_tottle/temp.glb
Normal file
Binary file not shown.
234
Ai_tottle/tiles copy.py
Normal file
234
Ai_tottle/tiles copy.py
Normal file
@ -0,0 +1,234 @@
|
||||
import os
|
||||
import logging
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.colors import LinearSegmentedColormap
|
||||
from shapely.geometry import Polygon, Point
|
||||
from tqdm import tqdm
|
||||
from py3dtiles.tileset import TileSet
|
||||
import requests
|
||||
|
||||
# 日志配置
|
||||
logger = logging.getLogger("TilesetProcessor")
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
|
||||
class TilesetProcessor:
|
||||
"""3D Tiles数据集处理器,用于加载、分析和比较两个3D Tiles模型"""
|
||||
|
||||
def __init__(self, tileset_path1, tileset_path2, resolution=1.0, polygon_points=None):
|
||||
self.tileset1 = self._load_tileset(tileset_path1)
|
||||
self.tileset2 = self._load_tileset(tileset_path2)
|
||||
self.resolution = resolution
|
||||
self.analysis_area = None
|
||||
self.height_difference_grid = None
|
||||
self.grid_bounds = None
|
||||
|
||||
if polygon_points:
|
||||
self.set_analysis_area(polygon_points=polygon_points)
|
||||
|
||||
def _load_tileset(self, path_or_url):
|
||||
try:
|
||||
logger.info(f"加载3D Tiles数据集: {path_or_url}")
|
||||
if path_or_url.startswith("http://") or path_or_url.startswith("https://"):
|
||||
resp = requests.get(path_or_url)
|
||||
resp.raise_for_status()
|
||||
tileset_json = resp.json()
|
||||
tileset = TileSet.from_dict(tileset_json)
|
||||
else:
|
||||
tileset = TileSet.from_file(path_or_url)
|
||||
logger.info(f"成功加载,包含 {len(tileset.root.children)} 个根瓦片")
|
||||
return tileset
|
||||
except Exception as e:
|
||||
logger.error(f"加载数据集失败(路径: {path_or_url}): {e}")
|
||||
raise
|
||||
|
||||
def set_analysis_area(self, bounds=None, polygon_points=None):
|
||||
if polygon_points:
|
||||
self.analysis_area = Polygon(polygon_points)
|
||||
min_x = min(p[0] for p in polygon_points)
|
||||
min_y = min(p[1] for p in polygon_points)
|
||||
max_x = max(p[0] for p in polygon_points)
|
||||
max_y = max(p[1] for p in polygon_points)
|
||||
self.grid_bounds = (min_x, min_y, max_x, max_y)
|
||||
logger.info(f"设置多边形分析区域: {polygon_points}")
|
||||
elif bounds:
|
||||
min_x, min_y, max_x, max_y = bounds
|
||||
self.analysis_area = Polygon([
|
||||
(min_x, min_y),
|
||||
(max_x, min_y),
|
||||
(max_x, max_y),
|
||||
(min_x, max_y)
|
||||
])
|
||||
self.grid_bounds = bounds
|
||||
logger.info(f"设置矩形分析区域: {bounds}")
|
||||
else:
|
||||
logger.error("请提供 bounds 或 polygon_points")
|
||||
return False
|
||||
|
||||
logger.info(f"分析区域面积: {self.analysis_area.area:.2f} 平方米")
|
||||
return True
|
||||
|
||||
def sample_heights(self):
|
||||
if self.analysis_area is None:
|
||||
logger.error("请先设置分析区域")
|
||||
return False
|
||||
|
||||
logger.info("开始在分析区域内采样高度值...")
|
||||
|
||||
min_x, min_y, max_x, max_y = self.grid_bounds
|
||||
rows = int((max_y - min_y) / self.resolution) + 1
|
||||
cols = int((max_x - min_x) / self.resolution) + 1
|
||||
|
||||
self.height_difference_grid = np.full((rows, cols), np.nan, dtype=np.float32)
|
||||
|
||||
total_points = rows * cols
|
||||
logger.info(f"创建了 {rows}x{cols}={total_points} 个采样点")
|
||||
|
||||
with tqdm(total=total_points, desc="采样高度点") as pbar:
|
||||
for i in range(rows):
|
||||
for j in range(cols):
|
||||
x = min_x + j * self.resolution
|
||||
y = min_y + i * self.resolution
|
||||
point = Point(x, y)
|
||||
|
||||
if not self.analysis_area.contains(point):
|
||||
pbar.update(1)
|
||||
continue
|
||||
|
||||
height1 = self._sample_height_at_point(self.tileset1, x, y)
|
||||
height2 = self._sample_height_at_point(self.tileset2, x, y)
|
||||
|
||||
if height1 is not None and height2 is not None:
|
||||
self.height_difference_grid[i, j] = height2 - height1
|
||||
|
||||
pbar.update(1)
|
||||
|
||||
valid_differences = self.height_difference_grid[~np.isnan(self.height_difference_grid)]
|
||||
if len(valid_differences) > 0:
|
||||
logger.info("高度变化统计:")
|
||||
logger.info(f" 平均变化: {np.mean(valid_differences):.2f}m")
|
||||
logger.info(f" 最大上升: {np.max(valid_differences):.2f}m")
|
||||
logger.info(f" 最大下降: {np.min(valid_differences):.2f}m")
|
||||
logger.info(f" 变化标准差: {np.std(valid_differences):.2f}m")
|
||||
else:
|
||||
logger.warning("未找到有效的高度差异数据")
|
||||
|
||||
return True
|
||||
|
||||
def _sample_height_at_point(self, tileset, x, y, max_depth=3):
|
||||
def find_tile(tile, depth=0):
|
||||
bbox = tile.bounding_volume.box
|
||||
min_x_tile = bbox[0] - bbox[3]
|
||||
max_x_tile = bbox[0] + bbox[3]
|
||||
min_y_tile = bbox[1] - bbox[4]
|
||||
max_y_tile = bbox[1] + bbox[4]
|
||||
|
||||
if not (min_x_tile <= x <= max_x_tile and min_y_tile <= y <= max_y_tile):
|
||||
return None
|
||||
|
||||
if (tile.content is not None and depth >= max_depth) or not tile.children:
|
||||
return tile
|
||||
|
||||
for child in tile.children:
|
||||
result = find_tile(child, depth + 1)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
return None
|
||||
|
||||
tile = find_tile(tileset.root)
|
||||
if tile is None or tile.content is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
# 简化模拟返回瓦片中心高度加随机偏移
|
||||
return tile.bounding_volume.box[2] + np.random.uniform(-0.5, 0.5)
|
||||
except Exception as e:
|
||||
logger.warning(f"获取瓦片高度失败: {e}")
|
||||
return None
|
||||
|
||||
def export_results(self, output_dir="results"):
|
||||
if self.height_difference_grid is None:
|
||||
logger.error("请先采样高度值")
|
||||
return
|
||||
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
csv_path = os.path.join(output_dir, "height_differences.csv")
|
||||
logger.info(f"导出CSV文件: {csv_path}")
|
||||
|
||||
min_x, min_y, max_x, max_y = self.grid_bounds
|
||||
rows, cols = self.height_difference_grid.shape
|
||||
|
||||
data = []
|
||||
for i in range(rows):
|
||||
for j in range(cols):
|
||||
if not np.isnan(self.height_difference_grid[i, j]):
|
||||
x = min_x + j * self.resolution
|
||||
y = min_y + i * self.resolution
|
||||
data.append({
|
||||
'x': x,
|
||||
'y': y,
|
||||
'height_difference': self.height_difference_grid[i, j]
|
||||
})
|
||||
|
||||
df = pd.DataFrame(data)
|
||||
df.to_csv(csv_path, index=False)
|
||||
|
||||
self._generate_heatmap(output_dir)
|
||||
|
||||
logger.info(f"结果已导出到 {output_dir} 目录")
|
||||
|
||||
def _generate_heatmap(self, output_dir):
|
||||
colors = [(0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0)]
|
||||
cmap = LinearSegmentedColormap.from_list('height_diff_cmap', colors, N=256)
|
||||
|
||||
data = self.height_difference_grid.copy()
|
||||
valid_mask = ~np.isnan(data)
|
||||
|
||||
if not np.any(valid_mask):
|
||||
logger.warning("没有有效的高度差异数据,无法生成热图")
|
||||
return
|
||||
|
||||
data[~valid_mask] = 0
|
||||
|
||||
plt.figure(figsize=(12, 10))
|
||||
plt.imshow(data, cmap=cmap, origin='lower',
|
||||
extent=[self.grid_bounds[0], self.grid_bounds[2],
|
||||
self.grid_bounds[1], self.grid_bounds[3]],
|
||||
alpha=0.9)
|
||||
|
||||
cbar = plt.colorbar()
|
||||
cbar.set_label('高度变化 (米)', fontsize=12)
|
||||
|
||||
plt.title('两个3D Tiles模型的高度变化分布', fontsize=16)
|
||||
plt.xlabel('X坐标 (米)', fontsize=12)
|
||||
plt.ylabel('Y坐标 (米)', fontsize=12)
|
||||
|
||||
heatmap_path = os.path.join(output_dir, "height_difference_heatmap.png")
|
||||
plt.savefig(heatmap_path, dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
logger.info(f"热图已保存到: {heatmap_path}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tileset1_url = "http://8.137.54.85:9000/300bdf2b-a150-406e-be63-d28bd29b409f/dszh/1748398014403562192_OUT/B3DM/tileset.json"
|
||||
tileset2_url = "http://8.137.54.85:9000/300bdf2b-a150-406e-be63-d28bd29b409f/dszh/1748325943733189898_OUT/B3DM/tileset.json"
|
||||
polygon_coords = [
|
||||
(102.2232, 29.3841),
|
||||
(102.2261, 29.3845),
|
||||
(102.2263, 29.3821),
|
||||
(102.2231, 29.3818)
|
||||
]
|
||||
resolution = 0.5
|
||||
output_dir = "output_results"
|
||||
|
||||
processor = TilesetProcessor(tileset1_url, tileset2_url, resolution, polygon_coords)
|
||||
if processor.sample_heights():
|
||||
processor.export_results(output_dir)
|
||||
print("分析完成!结果已导出到指定目录。")
|
||||
else:
|
||||
print("高度采样失败,无法完成分析。")
|
@ -1,265 +1,418 @@
|
||||
import os
|
||||
import requests
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import py3dtiles
|
||||
from py3dtiles import Tileset, BoundingVolumeBox
|
||||
from py3dtiles.tileset.content import B3dm
|
||||
from pyproj import Proj, Transformer
|
||||
from shapely.geometry import Polygon, Point
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.colors import LinearSegmentedColormap
|
||||
import argparse
|
||||
from tqdm import tqdm
|
||||
import logging
|
||||
from shapely.ops import unary_union
|
||||
import math
|
||||
from urllib.parse import urljoin
|
||||
from pygltflib import GLTF2
|
||||
|
||||
# 配置日志
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
# 目标区域经纬度坐标(转换为多边形)
|
||||
region_coords = [
|
||||
[102.22321717600258, 29.384100779345513],
|
||||
[102.22612442019208, 29.384506810595088],
|
||||
[102.22638603372953, 29.382061071072794],
|
||||
[102.22311237980807, 29.38186133280733],
|
||||
[102.22321717600258, 29.384100779345513] # 闭合多边形
|
||||
]
|
||||
|
||||
class TilesetProcessor:
|
||||
"""3D Tiles数据集处理器,用于加载、分析和比较两个3D Tiles模型"""
|
||||
# 创建多边形对象
|
||||
region_polygon = Polygon(region_coords)
|
||||
|
||||
def __init__(self, tileset_path1, tileset_path2, resolution=1.0):
|
||||
"""
|
||||
初始化处理器
|
||||
# 两个3D Tiles模型的URL
|
||||
tileset_urls = [
|
||||
"http://8.137.54.85:9000/300bdf2b-a150-406e-be63-d28bd29b409f/dszh/1748398014403562192_OUT/B3DM/tileset.json",
|
||||
"http://8.137.54.85:9000/300bdf2b-a150-406e-be63-d28bd29b409f/dszh/1748325943733189898_OUT/B3DM/tileset.json"
|
||||
]
|
||||
|
||||
参数:
|
||||
tileset_path1: 第一个3D Tiles数据集路径
|
||||
tileset_path2: 第二个3D Tiles数据集路径
|
||||
resolution: 分析网格的分辨率(米)
|
||||
"""
|
||||
self.tileset1 = self._load_tileset(tileset_path1)
|
||||
self.tileset2 = self._load_tileset(tileset_path2)
|
||||
self.resolution = resolution
|
||||
self.analysis_area = None
|
||||
self.height_difference_grid = None
|
||||
self.grid_bounds = None
|
||||
# 坐标系转换
|
||||
wgs84 = Proj(init='epsg:4326') # WGS84经纬度
|
||||
web_mercator = Proj(init='epsg:3857') # Web墨卡托投影
|
||||
|
||||
def _load_tileset(self, path):
|
||||
"""加载3D Tiles数据集"""
|
||||
try:
|
||||
logger.info(f"加载3D Tiles数据集: {path}")
|
||||
tileset = Tileset.from_file(path)
|
||||
logger.info(f"成功加载,包含 {len(tileset.root.children)} 个根瓦片")
|
||||
return tileset
|
||||
except Exception as e:
|
||||
logger.error(f"加载数据集失败: {e}")
|
||||
raise
|
||||
def adjust_z_in_transform(tileset_path, output_path=None, delta_z=0):
|
||||
import json
|
||||
import numpy as np
|
||||
|
||||
def set_analysis_area(self, bounds):
|
||||
"""
|
||||
设置分析区域
|
||||
if not os.path.exists(tileset_path):
|
||||
print(f"❌ tileset.json 文件不存在: {tileset_path}")
|
||||
return
|
||||
|
||||
参数:
|
||||
bounds: 分析区域边界元组 (min_x, min_y, max_x, max_y)
|
||||
"""
|
||||
min_x, min_y, max_x, max_y = bounds
|
||||
self.analysis_area = Polygon([
|
||||
(min_x, min_y),
|
||||
(max_x, min_y),
|
||||
(max_x, max_y),
|
||||
(min_x, max_y)
|
||||
])
|
||||
self.grid_bounds = bounds
|
||||
logger.info(f"设置分析区域: {bounds}")
|
||||
logger.info(f"分析区域面积: {self.analysis_area.area:.2f} 平方米")
|
||||
return True
|
||||
with open(tileset_path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
def sample_heights(self):
|
||||
"""在分析区域内采样两个模型的高度值并计算差异"""
|
||||
if self.analysis_area is None:
|
||||
logger.error("请先设置分析区域")
|
||||
return False
|
||||
root = data.get('root', {})
|
||||
|
||||
logger.info("开始在分析区域内采样高度值...")
|
||||
# 插入默认 transform
|
||||
if 'transform' not in root:
|
||||
print("⚠️ 未找到 transform 字段,使用单位矩阵")
|
||||
root['transform'] = [
|
||||
1, 0, 0, 0,
|
||||
0, 1, 0, 0,
|
||||
0, 0, 1, 0,
|
||||
0, 0, 0, 1
|
||||
]
|
||||
|
||||
# 创建网格
|
||||
min_x, min_y, max_x, max_y = self.grid_bounds
|
||||
rows = int((max_y - min_y) / self.resolution) + 1
|
||||
cols = int((max_x - min_x) / self.resolution) + 1
|
||||
transform = np.array(root['transform']).reshape(4, 4)
|
||||
print(f"原始 Z 平移: {transform[3, 2]}")
|
||||
transform[3, 2] += delta_z
|
||||
print(f"修正后 Z 平移: {transform[3, 2]}")
|
||||
|
||||
# 初始化高度差异网格
|
||||
self.height_difference_grid = np.zeros((rows, cols), dtype=np.float32)
|
||||
self.height_difference_grid[:] = np.nan # 初始化为NaN,表示未采样
|
||||
root['transform'] = transform.flatten().tolist()
|
||||
data['root'] = root
|
||||
|
||||
# 对每个网格点进行采样
|
||||
total_points = rows * cols
|
||||
logger.info(f"创建了 {rows}x{cols}={total_points} 个采样点")
|
||||
if output_path is None:
|
||||
output_path = tileset_path.replace(".json", f"_adjusted_z{int(delta_z)}.json")
|
||||
|
||||
with tqdm(total=total_points, desc="采样高度点") as pbar:
|
||||
for i in range(rows):
|
||||
for j in range(cols):
|
||||
# 计算当前点的坐标
|
||||
x = min_x + j * self.resolution
|
||||
y = min_y + i * self.resolution
|
||||
point = Point(x, y)
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
# 检查点是否在分析区域内
|
||||
if not self.analysis_area.contains(point):
|
||||
pbar.update(1)
|
||||
continue
|
||||
print(f"✅ 高度调整完成,输出文件: {output_path}")
|
||||
|
||||
# 采样两个模型的高度
|
||||
height1 = self._sample_height_at_point(self.tileset1, x, y)
|
||||
height2 = self._sample_height_at_point(self.tileset2, x, y)
|
||||
def download_tileset(tileset_url):
|
||||
"""下载tileset.json数据"""
|
||||
try:
|
||||
response = requests.get(tileset_url)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except Exception as e:
|
||||
print(f"下载tileset失败: {e}")
|
||||
return None
|
||||
|
||||
# 计算高度差异
|
||||
if height1 is not None and height2 is not None:
|
||||
self.height_difference_grid[i, j] = height2 - height1
|
||||
def extract_vertices(gltf_bytes):
|
||||
try:
|
||||
with open("temp.glb", "wb") as f:
|
||||
f.write(gltf_bytes)
|
||||
|
||||
pbar.update(1)
|
||||
gltf = GLTF2().load("temp.glb")
|
||||
|
||||
# 统计结果
|
||||
valid_differences = self.height_difference_grid[~np.isnan(self.height_difference_grid)]
|
||||
if len(valid_differences) > 0:
|
||||
logger.info(f"高度变化统计:")
|
||||
logger.info(f" 平均变化: {np.mean(valid_differences):.2f}m")
|
||||
logger.info(f" 最大上升: {np.max(valid_differences):.2f}m")
|
||||
logger.info(f" 最大下降: {np.min(valid_differences):.2f}m")
|
||||
logger.info(f" 变化标准差: {np.std(valid_differences):.2f}m")
|
||||
for mesh in gltf.meshes:
|
||||
for primitive in mesh.primitives:
|
||||
if not hasattr(primitive.attributes, "POSITION"):
|
||||
continue
|
||||
|
||||
accessor_idx = primitive.attributes.POSITION
|
||||
accessor = gltf.accessors[accessor_idx]
|
||||
buffer_view = gltf.bufferViews[accessor.bufferView]
|
||||
buffer = gltf.buffers[buffer_view.buffer]
|
||||
|
||||
byte_offset = (buffer_view.byteOffset or 0) + (accessor.byteOffset or 0)
|
||||
byte_length = accessor.count * 3 * 4 # 3 floats per vertex
|
||||
|
||||
data_bytes = gltf.binary_blob()[byte_offset: byte_offset + byte_length]
|
||||
vertices = np.frombuffer(data_bytes, dtype=np.float32).reshape((accessor.count, 3))
|
||||
|
||||
return vertices
|
||||
|
||||
except Exception as e:
|
||||
print(f"提取顶点数据失败: {e}")
|
||||
|
||||
return np.array([])
|
||||
|
||||
def find_closest_vertex(vertices, lon, lat):
|
||||
"""找到离目标点最近的顶点"""
|
||||
if not vertices:
|
||||
return None
|
||||
|
||||
# 计算距离并找到最近的顶点
|
||||
min_distance = float('inf')
|
||||
closest_vertex = None
|
||||
|
||||
for vertex in vertices:
|
||||
v_lon, v_lat, v_z = vertex
|
||||
# 计算经纬度距离(简化为平面距离)
|
||||
distance = math.hypot(v_lon - lon, v_lat - lat)
|
||||
if distance < min_distance:
|
||||
min_distance = distance
|
||||
closest_vertex = vertex
|
||||
|
||||
return closest_vertex
|
||||
|
||||
def compare_heights(heights1, heights2, tolerance=0.5):
|
||||
"""比较两个高度数据集,找出差异"""
|
||||
# 找到所有点的并集
|
||||
all_points = set(heights1.keys()).union(set(heights2.keys()))
|
||||
differences = []
|
||||
|
||||
for point in all_points:
|
||||
h1 = heights1.get(point, None)
|
||||
h2 = heights2.get(point, None)
|
||||
|
||||
# 检查是否有一个模型在该点没有数据
|
||||
if h1 is None or h2 is None:
|
||||
differences.append({
|
||||
'point': point,
|
||||
'height1': h1,
|
||||
'height2': h2,
|
||||
'difference': None,
|
||||
'type': 'missing_data'
|
||||
})
|
||||
else:
|
||||
logger.warning("未找到有效的高度差异数据")
|
||||
# 检查高度差异是否超过容忍度
|
||||
diff = abs(h1 - h2)
|
||||
if diff > tolerance:
|
||||
differences.append({
|
||||
'point': point,
|
||||
'height1': h1,
|
||||
'height2': h2,
|
||||
'difference': diff,
|
||||
'type': 'height_difference'
|
||||
})
|
||||
|
||||
return True
|
||||
return differences
|
||||
|
||||
def _sample_height_at_point(self, tileset, x, y, max_depth=3):
|
||||
"""在指定点采样3D Tiles模型的高度值"""
|
||||
# 找到包含该点的瓦片
|
||||
def find_tile(tile, depth=0):
|
||||
# 检查点是否在瓦片边界框内
|
||||
bbox = tile.bounding_volume.box
|
||||
min_x_tile = bbox[0] - bbox[3]
|
||||
max_x_tile = bbox[0] + bbox[3]
|
||||
min_y_tile = bbox[1] - bbox[4]
|
||||
max_y_tile = bbox[1] + bbox[4]
|
||||
|
||||
if not (min_x_tile <= x <= max_x_tile and min_y_tile <= y <= max_y_tile):
|
||||
return None
|
||||
|
||||
# 如果瓦片有内容且深度足够,或者没有子瓦片,就使用这个瓦片
|
||||
if (tile.content is not None and depth >= max_depth) or not tile.children:
|
||||
return tile
|
||||
|
||||
# 否则递归查找子瓦片
|
||||
for child in tile.children:
|
||||
result = find_tile(child, depth + 1)
|
||||
if result is not None:
|
||||
return result
|
||||
def get_b3dm_from_tile_json(json_url):
|
||||
try:
|
||||
response = requests.get(json_url)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
# 递归查找 b3dm uri
|
||||
def find_b3dm_uri(node):
|
||||
if 'content' in node and 'uri' in node['content']:
|
||||
uri = node['content']['uri']
|
||||
if uri.endswith('.b3dm'):
|
||||
return uri
|
||||
if 'children' in node:
|
||||
for child in node['children']:
|
||||
result = find_b3dm_uri(child)
|
||||
if result:
|
||||
return result
|
||||
return None
|
||||
|
||||
# 找到包含该点的最详细瓦片
|
||||
tile = find_tile(tileset.root)
|
||||
if tile is None or tile.content is None:
|
||||
root = data.get('root', {})
|
||||
b3dm_uri = find_b3dm_uri(root)
|
||||
if not b3dm_uri:
|
||||
print(f"{json_url} 中找不到 content.uri")
|
||||
return None
|
||||
|
||||
# 从瓦片内容中获取高度
|
||||
base_url = os.path.dirname(json_url)
|
||||
full_b3dm_url = urljoin(base_url + '/', b3dm_uri)
|
||||
return full_b3dm_url
|
||||
|
||||
except Exception as e:
|
||||
print(f"解析 JSON {json_url} 时出错: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def get_heights_in_region(tileset_url, sample_density=10):
|
||||
"""获取区域内的高度数据"""
|
||||
tileset_json = download_tileset(tileset_url)
|
||||
if not tileset_json:
|
||||
return {}
|
||||
|
||||
tiles_in_region = get_tiles_in_region(tileset_json, tileset_url)
|
||||
if not tiles_in_region:
|
||||
print(f"在{tileset_url}中未找到区域内的瓦片")
|
||||
return {}
|
||||
|
||||
min_lon, min_lat = min(p[0] for p in region_coords), min(p[1] for p in region_coords)
|
||||
max_lon, max_lat = max(p[0] for p in region_coords), max(p[1] for p in region_coords)
|
||||
lon_steps = np.linspace(min_lon, max_lon, sample_density)
|
||||
lat_steps = np.linspace(min_lat, max_lat, sample_density)
|
||||
|
||||
heights = {}
|
||||
|
||||
for tile_info in tiles_in_region:
|
||||
try:
|
||||
# 这里是简化的模拟实现,实际应该解析瓦片内容
|
||||
# 例如,使用py3dtiles中的TileContent.get_vertices()获取顶点
|
||||
# 然后找到最近的顶点或三角形来计算高度
|
||||
# 这里为了示例,我们返回瓦片中心的高度加上一个随机偏移
|
||||
return tile.bounding_volume.box[2] + np.random.uniform(-0.5, 0.5)
|
||||
response = requests.get(tile_info['url'])
|
||||
response.raise_for_status()
|
||||
b3dm_data = response.content
|
||||
|
||||
# ✅ 尝试解析为 b3dm
|
||||
try:
|
||||
gltf_bytes = parse_b3dm(b3dm_data)
|
||||
except Exception:
|
||||
# 可能 tile_info['url'] 是 JSON,不是真 b3dm
|
||||
print(f"尝试从 {tile_info['url']} 获取真实 b3dm 地址...")
|
||||
actual_b3dm_url = get_b3dm_from_tile_json(tile_info['url'])
|
||||
if not actual_b3dm_url:
|
||||
print(f"跳过:无法从 {tile_info['url']} 获取有效 b3dm")
|
||||
continue
|
||||
response = requests.get(actual_b3dm_url)
|
||||
response.raise_for_status()
|
||||
b3dm_data = response.content
|
||||
gltf_bytes = parse_b3dm(b3dm_data)
|
||||
|
||||
# ✅ 模拟解析 glb
|
||||
vertices = extract_vertices(gltf_bytes)
|
||||
if not vertices.size:
|
||||
continue
|
||||
|
||||
# ✅ 应用变换
|
||||
transformed_vertices = [transform_point(v, tile_info['transform']) for v in vertices]
|
||||
transformer = Transformer.from_crs("EPSG:3857", "EPSG:4326", always_xy=True)
|
||||
wgs84_vertices = []
|
||||
for x, y, z in transformed_vertices:
|
||||
lon, lat = transformer.transform(x, y)
|
||||
wgs84_vertices.append((lon, lat, z))
|
||||
|
||||
for lon in lon_steps:
|
||||
for lat in lat_steps:
|
||||
if point_in_region(lon, lat):
|
||||
closest_vertex = find_closest_vertex(wgs84_vertices, lon, lat)
|
||||
if closest_vertex:
|
||||
key = (round(lon, 6), round(lat, 6))
|
||||
heights[key] = closest_vertex[2]
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"获取瓦片高度失败: {e}")
|
||||
return None
|
||||
print(f"处理瓦片 {tile_info['url']} 时出错: {e}")
|
||||
continue
|
||||
|
||||
def export_results(self, output_dir="results"):
|
||||
"""导出分析结果"""
|
||||
if self.height_difference_grid is None:
|
||||
logger.error("请先采样高度值")
|
||||
return
|
||||
return heights
|
||||
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
def get_tiles_in_region(tileset_json, tileset_base_url):
|
||||
"""获取区域内的所有瓦片"""
|
||||
tiles_in_region = []
|
||||
|
||||
# 导出CSV文件
|
||||
csv_path = os.path.join(output_dir, "height_differences.csv")
|
||||
logger.info(f"导出CSV文件: {csv_path}")
|
||||
# 去除 tileset.json 得到根路径
|
||||
tileset_root_url = tileset_base_url.rsplit('/', 1)[0]
|
||||
|
||||
# 创建DataFrame
|
||||
min_x, min_y, max_x, max_y = self.grid_bounds
|
||||
rows, cols = self.height_difference_grid.shape
|
||||
def recursive_search(tile, parent_transform=None):
|
||||
tile_transform = tile.get('transform', [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1])
|
||||
combined_transform = multiply_matrices(parent_transform, tile_transform) if parent_transform else tile_transform
|
||||
|
||||
data = []
|
||||
for i in range(rows):
|
||||
for j in range(cols):
|
||||
if not np.isnan(self.height_difference_grid[i, j]):
|
||||
x = min_x + j * self.resolution
|
||||
y = min_y + i * self.resolution
|
||||
data.append({
|
||||
'x': x,
|
||||
'y': y,
|
||||
'height_difference': self.height_difference_grid[i, j]
|
||||
})
|
||||
if 'boundingVolume' in tile and is_bounding_volume_intersects_region(tile['boundingVolume']):
|
||||
if 'content' in tile and 'uri' in tile['content']:
|
||||
# 修复URL拼接
|
||||
tile_url = urljoin(tileset_root_url + '/', tile['content']['uri'])
|
||||
tiles_in_region.append({
|
||||
'url': tile_url,
|
||||
'transform': combined_transform
|
||||
})
|
||||
|
||||
df = pd.DataFrame(data)
|
||||
df.to_csv(csv_path, index=False)
|
||||
if 'children' in tile:
|
||||
for child in tile['children']:
|
||||
recursive_search(child, combined_transform)
|
||||
|
||||
# 生成热图
|
||||
self._generate_heatmap(output_dir)
|
||||
if 'root' in tileset_json:
|
||||
recursive_search(tileset_json['root'])
|
||||
|
||||
logger.info(f"结果已导出到 {output_dir} 目录")
|
||||
return tiles_in_region
|
||||
|
||||
def _generate_heatmap(self, output_dir):
|
||||
"""生成高度变化热图"""
|
||||
# 创建自定义颜色映射
|
||||
colors = [(0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0)] # 蓝-白-红
|
||||
cmap = LinearSegmentedColormap.from_list('height_diff_cmap', colors, N=256)
|
||||
def is_bounding_volume_intersects_region(bounding_volume):
|
||||
"""检查边界体是否与区域相交"""
|
||||
# 简化实现,实际需要根据不同边界体类型实现
|
||||
if 'region' in bounding_volume:
|
||||
# region格式: [west, south, east, north, minHeight, maxHeight]
|
||||
region = bounding_volume['region']
|
||||
bv_polygon = Polygon([
|
||||
[region[0], region[1]],
|
||||
[region[2], region[1]],
|
||||
[region[2], region[3]],
|
||||
[region[0], region[3]],
|
||||
[region[0], region[1]]
|
||||
])
|
||||
return region_polygon.intersects(bv_polygon)
|
||||
elif 'box' in bounding_volume:
|
||||
# 对于box类型,需要转换到经纬度后再判断
|
||||
# 这里简化处理,返回True让更细致的检查在后续进行
|
||||
return True
|
||||
elif 'sphere' in bounding_volume:
|
||||
# 对于sphere类型,简化处理
|
||||
return True
|
||||
return False
|
||||
|
||||
# 准备数据
|
||||
data = self.height_difference_grid.copy()
|
||||
valid_mask = ~np.isnan(data)
|
||||
def multiply_matrices(a, b):
|
||||
"""计算两个4x4矩阵的乘积"""
|
||||
result = [0.0] * 16
|
||||
for i in range(4):
|
||||
for j in range(4):
|
||||
result[i*4 + j] = a[i*4 + 0] * b[0*4 + j] + \
|
||||
a[i*4 + 1] * b[1*4 + j] + \
|
||||
a[i*4 + 2] * b[2*4 + j] + \
|
||||
a[i*4 + 3] * b[3*4 + j]
|
||||
return result
|
||||
|
||||
if not np.any(valid_mask):
|
||||
logger.warning("没有有效的高度差异数据,无法生成热图")
|
||||
return
|
||||
def parse_b3dm(b3dm_data: bytes):
|
||||
"""
|
||||
解析 b3dm 文件,返回 glb 二进制数据
|
||||
"""
|
||||
import struct
|
||||
|
||||
# 设置NaN值为0以便绘图,但在颜色映射中标记为透明
|
||||
data[~valid_mask] = 0
|
||||
if b3dm_data[:4] != b'b3dm':
|
||||
raise ValueError("不是有效的 b3dm 文件")
|
||||
|
||||
# 创建图形
|
||||
plt.figure(figsize=(12, 10))
|
||||
plt.imshow(data, cmap=cmap, origin='lower',
|
||||
extent=[self.grid_bounds[0], self.grid_bounds[2],
|
||||
self.grid_bounds[1], self.grid_bounds[3]],
|
||||
alpha=0.9)
|
||||
# 读取 header(28 字节)
|
||||
header = struct.unpack('<4sIIIIII', b3dm_data[:28])
|
||||
_, version, byte_length, ft_json_len, ft_bin_len, bt_json_len, bt_bin_len = header
|
||||
|
||||
# 添加颜色条
|
||||
cbar = plt.colorbar()
|
||||
cbar.set_label('高度变化 (米)', fontsize=12)
|
||||
glb_start = 28 + ft_json_len + ft_bin_len + bt_json_len + bt_bin_len
|
||||
glb_bytes = b3dm_data[glb_start:]
|
||||
|
||||
# 设置标题和坐标轴
|
||||
plt.title('两个3D Tiles模型的高度变化分布', fontsize=16)
|
||||
plt.xlabel('X坐标 (米)', fontsize=12)
|
||||
plt.ylabel('Y坐标 (米)', fontsize=12)
|
||||
return glb_bytes
|
||||
|
||||
# 保存图形
|
||||
heatmap_path = os.path.join(output_dir, "height_difference_heatmap.png")
|
||||
plt.savefig(heatmap_path, dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
def point_in_region(lon, lat):
|
||||
"""判断点是否在目标区域内"""
|
||||
return region_polygon.contains(Point(lon, lat))
|
||||
|
||||
logger.info(f"热图已保存到: {heatmap_path}")
|
||||
def transform_point(point, matrix):
|
||||
"""应用变换矩阵到点"""
|
||||
x, y, z = point
|
||||
x_out = x * matrix[0] + y * matrix[4] + z * matrix[8] + matrix[12]
|
||||
y_out = x * matrix[1] + y * matrix[5] + z * matrix[9] + matrix[13]
|
||||
z_out = x * matrix[2] + y * matrix[6] + z * matrix[10] + matrix[14]
|
||||
return (x_out, y_out, z_out)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='分析两个3D Tiles模型指定区域的高度变化')
|
||||
parser.add_argument('--tileset1', required=True, help='第一个3D Tiles数据集路径')
|
||||
parser.add_argument('--tileset2', required=True, help='第二个3D Tiles数据集路径')
|
||||
parser.add_argument('--bounds', required=True, type=float, nargs=4,
|
||||
help='分析区域边界 [min_x, min_y, max_x, max_y]')
|
||||
parser.add_argument('--resolution', type=float, default=1.0, help='采样分辨率(米)')
|
||||
parser.add_argument('--output', default='results', help='输出目录')
|
||||
sample_density = 20
|
||||
|
||||
args = parser.parse_args()
|
||||
print("正在从第一个3D Tiles模型提取区域高度数据...")
|
||||
heights1 = get_heights_in_region(tileset_urls[0], sample_density)
|
||||
|
||||
processor = TilesetProcessor(args.tileset1, args.tileset2, args.resolution)
|
||||
print("正在从第二个3D Tiles模型提取区域高度数据...")
|
||||
heights2 = get_heights_in_region(tileset_urls[1], sample_density)
|
||||
|
||||
# 设置分析区域
|
||||
if processor.set_analysis_area(args.bounds):
|
||||
if processor.sample_heights():
|
||||
processor.export_results(args.output)
|
||||
print("分析完成!结果已导出到指定目录。")
|
||||
else:
|
||||
print("高度采样失败,无法完成分析。")
|
||||
if not heights1 or not heights2:
|
||||
print("无法获取足够的高度数据进行比较")
|
||||
return
|
||||
|
||||
# 计算平均高度
|
||||
avg1 = np.mean(list(heights1.values()))
|
||||
avg2 = np.mean(list(heights2.values()))
|
||||
|
||||
print(f"\n模型1 平均高度: {avg1:.2f} 米")
|
||||
print(f"模型2 平均高度: {avg2:.2f} 米")
|
||||
|
||||
delta = avg1 - avg2
|
||||
print(f"高度差: {delta:.2f} 米")
|
||||
|
||||
# 🔧 自动统一高度
|
||||
if abs(delta) > 0.5:
|
||||
print("\n⚙️ 正在统一高度基准(修改模型2的 transform)...")
|
||||
# tileset_urls[1] 是远程 URL,下载后调整
|
||||
try:
|
||||
ts2_url = tileset_urls[1]
|
||||
response = requests.get(ts2_url)
|
||||
response.raise_for_status()
|
||||
with open("tileset_model2.json", "w", encoding="utf-8") as f:
|
||||
f.write(response.text)
|
||||
|
||||
adjust_z_in_transform("tileset_model2.json", "tileset_model2_adjusted.json", delta_z=delta)
|
||||
except Exception as e:
|
||||
print(f"❌ 调整高度失败: {e}")
|
||||
else:
|
||||
print("设置分析区域失败,无法进行分析。")
|
||||
print("\n✅ 高度差异在容忍范围内,无需调整")
|
||||
|
||||
# 🔍 差异分析
|
||||
print("\n正在分析详细差异点...")
|
||||
differences = compare_heights(heights1, heights2, 0.5)
|
||||
|
||||
if differences:
|
||||
print(f"共发现 {len(differences)} 处显著高度差异:")
|
||||
for i, diff in enumerate(differences[:10], 1): # 仅显示前10条
|
||||
lon, lat = diff['point']
|
||||
print(f"\n位置 {i}: 经度 {lon}, 纬度 {lat}")
|
||||
print(f"模型1高度: {diff['height1']:.2f}米")
|
||||
print(f"模型2高度: {diff['height2']:.2f}米")
|
||||
if diff['difference'] is not None:
|
||||
print(f"差异: {diff['difference']:.2f}米")
|
||||
else:
|
||||
print("差异: 一个模型在该位置没有数据")
|
||||
else:
|
||||
print("两个模型在指定区域高度基本一致 ✅")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
1
Ai_tottle/tileset_model2.json
Normal file
1
Ai_tottle/tileset_model2.json
Normal file
File diff suppressed because one or more lines are too long
2000
Ai_tottle/tileset_model2_adjusted.json
Normal file
2000
Ai_tottle/tileset_model2_adjusted.json
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user