第一次提交,已经完成训练、识别接口
105
UAVid_Segmentation/v1.5_official/args.yaml
Normal file
@ -0,0 +1,105 @@
|
||||
task: segment
|
||||
mode: train
|
||||
model: pt/yolo11s-seg.pt
|
||||
data: dataset/dataset-1760926118282226200\data.yaml
|
||||
epochs: 50
|
||||
time: null
|
||||
patience: 100
|
||||
batch: 4
|
||||
imgsz: 640
|
||||
save: true
|
||||
save_period: -1
|
||||
cache: false
|
||||
device: null
|
||||
workers: 0
|
||||
project: UAVid_Segmentation
|
||||
name: v1.5_official
|
||||
exist_ok: false
|
||||
pretrained: true
|
||||
optimizer: SGD
|
||||
verbose: true
|
||||
seed: 0
|
||||
deterministic: true
|
||||
single_cls: false
|
||||
rect: false
|
||||
cos_lr: false
|
||||
close_mosaic: 10
|
||||
resume: false
|
||||
amp: true
|
||||
fraction: 1.0
|
||||
profile: false
|
||||
freeze: null
|
||||
multi_scale: false
|
||||
overlap_mask: true
|
||||
mask_ratio: 4
|
||||
dropout: 0.0
|
||||
val: true
|
||||
split: val
|
||||
save_json: false
|
||||
conf: null
|
||||
iou: 0.7
|
||||
max_det: 300
|
||||
half: false
|
||||
dnn: false
|
||||
plots: true
|
||||
source: null
|
||||
vid_stride: 1
|
||||
stream_buffer: false
|
||||
visualize: false
|
||||
augment: true
|
||||
agnostic_nms: false
|
||||
classes: null
|
||||
retina_masks: false
|
||||
embed: null
|
||||
show: false
|
||||
save_frames: false
|
||||
save_txt: false
|
||||
save_conf: false
|
||||
save_crop: false
|
||||
show_labels: true
|
||||
show_conf: true
|
||||
show_boxes: true
|
||||
line_width: null
|
||||
format: torchscript
|
||||
keras: false
|
||||
optimize: false
|
||||
int8: false
|
||||
dynamic: false
|
||||
simplify: true
|
||||
opset: null
|
||||
workspace: null
|
||||
nms: false
|
||||
lr0: 0.01
|
||||
lrf: 0.01
|
||||
momentum: 0.9
|
||||
weight_decay: 0.0005
|
||||
warmup_epochs: 3.0
|
||||
warmup_momentum: 0.8
|
||||
warmup_bias_lr: 0.1
|
||||
box: 7.5
|
||||
cls: 0.5
|
||||
dfl: 1.5
|
||||
pose: 12.0
|
||||
kobj: 1.0
|
||||
nbs: 64
|
||||
hsv_h: 0.015
|
||||
hsv_s: 0.7
|
||||
hsv_v: 0.4
|
||||
degrees: 0.0
|
||||
translate: 0.1
|
||||
scale: 0.5
|
||||
shear: 0.0
|
||||
perspective: 0.0
|
||||
flipud: 0.0
|
||||
fliplr: 0.5
|
||||
bgr: 0.0
|
||||
mosaic: 1.0
|
||||
mixup: 0.0
|
||||
cutmix: 0.0
|
||||
copy_paste: 0.0
|
||||
copy_paste_mode: flip
|
||||
auto_augment: randaugment
|
||||
erasing: 0.4
|
||||
cfg: null
|
||||
tracker: botsort.yaml
|
||||
save_dir: UAVid_Segmentation\v1.5_official
|
||||
3
UAVid_Segmentation/v1.5_official/results.csv
Normal file
@ -0,0 +1,3 @@
|
||||
epoch,time,train/box_loss,train/seg_loss,train/cls_loss,train/dfl_loss,metrics/precision(B),metrics/recall(B),metrics/mAP50(B),metrics/mAP50-95(B),metrics/precision(M),metrics/recall(M),metrics/mAP50(M),metrics/mAP50-95(M),val/box_loss,val/seg_loss,val/cls_loss,val/dfl_loss,lr/pg0,lr/pg1,lr/pg2
|
||||
1,1.79583,0,0,74.9982,0,0,0,0,0,0,0,0,0,0,0,11.5625,0,0.0955,0.0005,0.0005
|
||||
2,2.92969,0,0,83.6285,0,0,0,0,0,0,0,0,0,0,0,14.2266,0,0.0900782,0.00107822,0.00107822
|
||||
|
BIN
UAVid_Segmentation/v1.5_official/train_batch0.jpg
Normal file
|
After Width: | Height: | Size: 174 KiB |
BIN
UAVid_Segmentation/v1.5_official/train_batch1.jpg
Normal file
|
After Width: | Height: | Size: 178 KiB |
BIN
UAVid_Segmentation/v1.5_official/train_batch2.jpg
Normal file
|
After Width: | Height: | Size: 171 KiB |
BIN
UAVid_Segmentation/v1.5_official/weights/best.pt
Normal file
BIN
UAVid_Segmentation/v1.5_official/weights/last.pt
Normal file
105
UAVid_Segmentation/v1.5_official2/args.yaml
Normal file
@ -0,0 +1,105 @@
|
||||
task: segment
|
||||
mode: train
|
||||
model: pt/yolo11s-seg.pt
|
||||
data: dataset/dataset-1760926301844370600\data.yaml
|
||||
epochs: 50
|
||||
time: null
|
||||
patience: 100
|
||||
batch: 4
|
||||
imgsz: 640
|
||||
save: true
|
||||
save_period: -1
|
||||
cache: false
|
||||
device: null
|
||||
workers: 0
|
||||
project: UAVid_Segmentation
|
||||
name: v1.5_official2
|
||||
exist_ok: false
|
||||
pretrained: true
|
||||
optimizer: SGD
|
||||
verbose: true
|
||||
seed: 0
|
||||
deterministic: true
|
||||
single_cls: false
|
||||
rect: false
|
||||
cos_lr: false
|
||||
close_mosaic: 10
|
||||
resume: false
|
||||
amp: true
|
||||
fraction: 1.0
|
||||
profile: false
|
||||
freeze: null
|
||||
multi_scale: false
|
||||
overlap_mask: true
|
||||
mask_ratio: 4
|
||||
dropout: 0.0
|
||||
val: true
|
||||
split: val
|
||||
save_json: false
|
||||
conf: null
|
||||
iou: 0.7
|
||||
max_det: 300
|
||||
half: false
|
||||
dnn: false
|
||||
plots: true
|
||||
source: null
|
||||
vid_stride: 1
|
||||
stream_buffer: false
|
||||
visualize: false
|
||||
augment: true
|
||||
agnostic_nms: false
|
||||
classes: null
|
||||
retina_masks: false
|
||||
embed: null
|
||||
show: false
|
||||
save_frames: false
|
||||
save_txt: false
|
||||
save_conf: false
|
||||
save_crop: false
|
||||
show_labels: true
|
||||
show_conf: true
|
||||
show_boxes: true
|
||||
line_width: null
|
||||
format: torchscript
|
||||
keras: false
|
||||
optimize: false
|
||||
int8: false
|
||||
dynamic: false
|
||||
simplify: true
|
||||
opset: null
|
||||
workspace: null
|
||||
nms: false
|
||||
lr0: 0.01
|
||||
lrf: 0.01
|
||||
momentum: 0.9
|
||||
weight_decay: 0.0005
|
||||
warmup_epochs: 3.0
|
||||
warmup_momentum: 0.8
|
||||
warmup_bias_lr: 0.1
|
||||
box: 7.5
|
||||
cls: 0.5
|
||||
dfl: 1.5
|
||||
pose: 12.0
|
||||
kobj: 1.0
|
||||
nbs: 64
|
||||
hsv_h: 0.015
|
||||
hsv_s: 0.7
|
||||
hsv_v: 0.4
|
||||
degrees: 0.0
|
||||
translate: 0.1
|
||||
scale: 0.5
|
||||
shear: 0.0
|
||||
perspective: 0.0
|
||||
flipud: 0.0
|
||||
fliplr: 0.5
|
||||
bgr: 0.0
|
||||
mosaic: 1.0
|
||||
mixup: 0.0
|
||||
cutmix: 0.0
|
||||
copy_paste: 0.0
|
||||
copy_paste_mode: flip
|
||||
auto_augment: randaugment
|
||||
erasing: 0.4
|
||||
cfg: null
|
||||
tracker: botsort.yaml
|
||||
save_dir: UAVid_Segmentation\v1.5_official2
|
||||
3
UAVid_Segmentation/v1.5_official2/results.csv
Normal file
@ -0,0 +1,3 @@
|
||||
epoch,time,train/box_loss,train/seg_loss,train/cls_loss,train/dfl_loss,metrics/precision(B),metrics/recall(B),metrics/mAP50(B),metrics/mAP50-95(B),metrics/precision(M),metrics/recall(M),metrics/mAP50(M),metrics/mAP50-95(M),val/box_loss,val/seg_loss,val/cls_loss,val/dfl_loss,lr/pg0,lr/pg1,lr/pg2
|
||||
1,1.93797,0,0,74.9982,0,0,0,0,0,0,0,0,0,0,0,11.5625,0,0.0955,0.0005,0.0005
|
||||
2,3.18176,0,0,83.6285,0,0,0,0,0,0,0,0,0,0,0,14.2266,0,0.0900782,0.00107822,0.00107822
|
||||
|
BIN
UAVid_Segmentation/v1.5_official2/train_batch0.jpg
Normal file
|
After Width: | Height: | Size: 174 KiB |
BIN
UAVid_Segmentation/v1.5_official2/train_batch1.jpg
Normal file
|
After Width: | Height: | Size: 178 KiB |
BIN
UAVid_Segmentation/v1.5_official2/train_batch2.jpg
Normal file
|
After Width: | Height: | Size: 171 KiB |
BIN
UAVid_Segmentation/v1.5_official2/weights/best.pt
Normal file
BIN
UAVid_Segmentation/v1.5_official2/weights/last.pt
Normal file
105
UAVid_Segmentation/v1.5_official3/args.yaml
Normal file
@ -0,0 +1,105 @@
|
||||
task: segment
|
||||
mode: train
|
||||
model: pt/yolo11s-seg.pt
|
||||
data: dataset/dataset-1760926476398531600\data.yaml
|
||||
epochs: 50
|
||||
time: null
|
||||
patience: 100
|
||||
batch: 4
|
||||
imgsz: 640
|
||||
save: true
|
||||
save_period: -1
|
||||
cache: false
|
||||
device: null
|
||||
workers: 0
|
||||
project: UAVid_Segmentation
|
||||
name: v1.5_official3
|
||||
exist_ok: false
|
||||
pretrained: true
|
||||
optimizer: SGD
|
||||
verbose: true
|
||||
seed: 0
|
||||
deterministic: true
|
||||
single_cls: false
|
||||
rect: false
|
||||
cos_lr: false
|
||||
close_mosaic: 10
|
||||
resume: false
|
||||
amp: true
|
||||
fraction: 1.0
|
||||
profile: false
|
||||
freeze: null
|
||||
multi_scale: false
|
||||
overlap_mask: true
|
||||
mask_ratio: 4
|
||||
dropout: 0.0
|
||||
val: true
|
||||
split: val
|
||||
save_json: false
|
||||
conf: null
|
||||
iou: 0.7
|
||||
max_det: 300
|
||||
half: false
|
||||
dnn: false
|
||||
plots: true
|
||||
source: null
|
||||
vid_stride: 1
|
||||
stream_buffer: false
|
||||
visualize: false
|
||||
augment: true
|
||||
agnostic_nms: false
|
||||
classes: null
|
||||
retina_masks: false
|
||||
embed: null
|
||||
show: false
|
||||
save_frames: false
|
||||
save_txt: false
|
||||
save_conf: false
|
||||
save_crop: false
|
||||
show_labels: true
|
||||
show_conf: true
|
||||
show_boxes: true
|
||||
line_width: null
|
||||
format: torchscript
|
||||
keras: false
|
||||
optimize: false
|
||||
int8: false
|
||||
dynamic: false
|
||||
simplify: true
|
||||
opset: null
|
||||
workspace: null
|
||||
nms: false
|
||||
lr0: 0.01
|
||||
lrf: 0.01
|
||||
momentum: 0.9
|
||||
weight_decay: 0.0005
|
||||
warmup_epochs: 3.0
|
||||
warmup_momentum: 0.8
|
||||
warmup_bias_lr: 0.1
|
||||
box: 7.5
|
||||
cls: 0.5
|
||||
dfl: 1.5
|
||||
pose: 12.0
|
||||
kobj: 1.0
|
||||
nbs: 64
|
||||
hsv_h: 0.015
|
||||
hsv_s: 0.7
|
||||
hsv_v: 0.4
|
||||
degrees: 0.0
|
||||
translate: 0.1
|
||||
scale: 0.5
|
||||
shear: 0.0
|
||||
perspective: 0.0
|
||||
flipud: 0.0
|
||||
fliplr: 0.5
|
||||
bgr: 0.0
|
||||
mosaic: 1.0
|
||||
mixup: 0.0
|
||||
cutmix: 0.0
|
||||
copy_paste: 0.0
|
||||
copy_paste_mode: flip
|
||||
auto_augment: randaugment
|
||||
erasing: 0.4
|
||||
cfg: null
|
||||
tracker: botsort.yaml
|
||||
save_dir: UAVid_Segmentation\v1.5_official3
|
||||
3
UAVid_Segmentation/v1.5_official3/results.csv
Normal file
@ -0,0 +1,3 @@
|
||||
epoch,time,train/box_loss,train/seg_loss,train/cls_loss,train/dfl_loss,metrics/precision(B),metrics/recall(B),metrics/mAP50(B),metrics/mAP50-95(B),metrics/precision(M),metrics/recall(M),metrics/mAP50(M),metrics/mAP50-95(M),val/box_loss,val/seg_loss,val/cls_loss,val/dfl_loss,lr/pg0,lr/pg1,lr/pg2
|
||||
1,1.89814,0,0,74.9982,0,0,0,0,0,0,0,0,0,0,0,11.5625,0,0.0955,0.0005,0.0005
|
||||
2,3.0706,0,0,83.6285,0,0,0,0,0,0,0,0,0,0,0,14.2266,0,0.0900782,0.00107822,0.00107822
|
||||
|
BIN
UAVid_Segmentation/v1.5_official3/train_batch0.jpg
Normal file
|
After Width: | Height: | Size: 174 KiB |
BIN
UAVid_Segmentation/v1.5_official3/train_batch1.jpg
Normal file
|
After Width: | Height: | Size: 178 KiB |
BIN
UAVid_Segmentation/v1.5_official3/train_batch2.jpg
Normal file
|
After Width: | Height: | Size: 171 KiB |
BIN
UAVid_Segmentation/v1.5_official3/weights/best.pt
Normal file
BIN
UAVid_Segmentation/v1.5_official3/weights/last.pt
Normal file
105
UAVid_Segmentation/v1.5_official4/args.yaml
Normal file
@ -0,0 +1,105 @@
|
||||
task: segment
|
||||
mode: train
|
||||
model: pt/yolo11s-seg.pt
|
||||
data: dataset/dataset-1760932424891014400\data.yaml
|
||||
epochs: 50
|
||||
time: null
|
||||
patience: 100
|
||||
batch: 4
|
||||
imgsz: 640
|
||||
save: true
|
||||
save_period: -1
|
||||
cache: false
|
||||
device: null
|
||||
workers: 0
|
||||
project: UAVid_Segmentation
|
||||
name: v1.5_official4
|
||||
exist_ok: false
|
||||
pretrained: true
|
||||
optimizer: SGD
|
||||
verbose: true
|
||||
seed: 0
|
||||
deterministic: true
|
||||
single_cls: false
|
||||
rect: false
|
||||
cos_lr: false
|
||||
close_mosaic: 10
|
||||
resume: false
|
||||
amp: true
|
||||
fraction: 1.0
|
||||
profile: false
|
||||
freeze: null
|
||||
multi_scale: false
|
||||
overlap_mask: true
|
||||
mask_ratio: 4
|
||||
dropout: 0.0
|
||||
val: true
|
||||
split: val
|
||||
save_json: false
|
||||
conf: null
|
||||
iou: 0.7
|
||||
max_det: 300
|
||||
half: false
|
||||
dnn: false
|
||||
plots: true
|
||||
source: null
|
||||
vid_stride: 1
|
||||
stream_buffer: false
|
||||
visualize: false
|
||||
augment: true
|
||||
agnostic_nms: false
|
||||
classes: null
|
||||
retina_masks: false
|
||||
embed: null
|
||||
show: false
|
||||
save_frames: false
|
||||
save_txt: false
|
||||
save_conf: false
|
||||
save_crop: false
|
||||
show_labels: true
|
||||
show_conf: true
|
||||
show_boxes: true
|
||||
line_width: null
|
||||
format: torchscript
|
||||
keras: false
|
||||
optimize: false
|
||||
int8: false
|
||||
dynamic: false
|
||||
simplify: true
|
||||
opset: null
|
||||
workspace: null
|
||||
nms: false
|
||||
lr0: 0.01
|
||||
lrf: 0.01
|
||||
momentum: 0.9
|
||||
weight_decay: 0.0005
|
||||
warmup_epochs: 3.0
|
||||
warmup_momentum: 0.8
|
||||
warmup_bias_lr: 0.1
|
||||
box: 7.5
|
||||
cls: 0.5
|
||||
dfl: 1.5
|
||||
pose: 12.0
|
||||
kobj: 1.0
|
||||
nbs: 64
|
||||
hsv_h: 0.015
|
||||
hsv_s: 0.7
|
||||
hsv_v: 0.4
|
||||
degrees: 0.0
|
||||
translate: 0.1
|
||||
scale: 0.5
|
||||
shear: 0.0
|
||||
perspective: 0.0
|
||||
flipud: 0.0
|
||||
fliplr: 0.5
|
||||
bgr: 0.0
|
||||
mosaic: 1.0
|
||||
mixup: 0.0
|
||||
cutmix: 0.0
|
||||
copy_paste: 0.0
|
||||
copy_paste_mode: flip
|
||||
auto_augment: randaugment
|
||||
erasing: 0.4
|
||||
cfg: null
|
||||
tracker: botsort.yaml
|
||||
save_dir: UAVid_Segmentation\v1.5_official4
|
||||
BIN
UAVid_Segmentation/v1.5_official4/labels.jpg
Normal file
|
After Width: | Height: | Size: 106 KiB |
3
UAVid_Segmentation/v1.5_official4/results.csv
Normal file
@ -0,0 +1,3 @@
|
||||
epoch,time,train/box_loss,train/seg_loss,train/cls_loss,train/dfl_loss,metrics/precision(B),metrics/recall(B),metrics/mAP50(B),metrics/mAP50-95(B),metrics/precision(M),metrics/recall(M),metrics/mAP50(M),metrics/mAP50-95(M),val/box_loss,val/seg_loss,val/cls_loss,val/dfl_loss,lr/pg0,lr/pg1,lr/pg2
|
||||
1,3.14625,4.19941,6.41601,8.47523,3.35484,0.01042,0.5,0.24875,0.07462,0,0,0,0,3.51347,4.84009,5.55821,3.24894,0.0955,0.0005,0.0005
|
||||
2,4.97753,3.71605,7.05627,8.7431,2.98561,0.02083,0.5,0.16583,0.03317,0,0,0,0,3.61686,4.8416,6.10662,3.2123,0.0900782,0.00107822,0.00107822
|
||||
|
BIN
UAVid_Segmentation/v1.5_official4/train_batch0.jpg
Normal file
|
After Width: | Height: | Size: 179 KiB |
BIN
UAVid_Segmentation/v1.5_official4/train_batch1.jpg
Normal file
|
After Width: | Height: | Size: 184 KiB |
BIN
UAVid_Segmentation/v1.5_official4/train_batch2.jpg
Normal file
|
After Width: | Height: | Size: 176 KiB |
BIN
UAVid_Segmentation/v1.5_official4/weights/best.pt
Normal file
BIN
UAVid_Segmentation/v1.5_official4/weights/last.pt
Normal file
105
UAVid_Segmentation/v1.5_official5/args.yaml
Normal file
@ -0,0 +1,105 @@
|
||||
task: segment
|
||||
mode: train
|
||||
model: pt/yolo11s-seg.pt
|
||||
data: dataset/dataset-1760955532913358800\data.yaml
|
||||
epochs: 50
|
||||
time: null
|
||||
patience: 100
|
||||
batch: 4
|
||||
imgsz: 640
|
||||
save: true
|
||||
save_period: -1
|
||||
cache: false
|
||||
device: null
|
||||
workers: 0
|
||||
project: UAVid_Segmentation
|
||||
name: v1.5_official5
|
||||
exist_ok: false
|
||||
pretrained: true
|
||||
optimizer: SGD
|
||||
verbose: true
|
||||
seed: 0
|
||||
deterministic: true
|
||||
single_cls: false
|
||||
rect: false
|
||||
cos_lr: false
|
||||
close_mosaic: 10
|
||||
resume: false
|
||||
amp: true
|
||||
fraction: 1.0
|
||||
profile: false
|
||||
freeze: null
|
||||
multi_scale: false
|
||||
overlap_mask: true
|
||||
mask_ratio: 4
|
||||
dropout: 0.0
|
||||
val: true
|
||||
split: val
|
||||
save_json: false
|
||||
conf: null
|
||||
iou: 0.7
|
||||
max_det: 300
|
||||
half: false
|
||||
dnn: false
|
||||
plots: true
|
||||
source: null
|
||||
vid_stride: 1
|
||||
stream_buffer: false
|
||||
visualize: false
|
||||
augment: true
|
||||
agnostic_nms: false
|
||||
classes: null
|
||||
retina_masks: false
|
||||
embed: null
|
||||
show: false
|
||||
save_frames: false
|
||||
save_txt: false
|
||||
save_conf: false
|
||||
save_crop: false
|
||||
show_labels: true
|
||||
show_conf: true
|
||||
show_boxes: true
|
||||
line_width: null
|
||||
format: torchscript
|
||||
keras: false
|
||||
optimize: false
|
||||
int8: false
|
||||
dynamic: false
|
||||
simplify: true
|
||||
opset: null
|
||||
workspace: null
|
||||
nms: false
|
||||
lr0: 0.01
|
||||
lrf: 0.01
|
||||
momentum: 0.9
|
||||
weight_decay: 0.0005
|
||||
warmup_epochs: 3.0
|
||||
warmup_momentum: 0.8
|
||||
warmup_bias_lr: 0.1
|
||||
box: 7.5
|
||||
cls: 0.5
|
||||
dfl: 1.5
|
||||
pose: 12.0
|
||||
kobj: 1.0
|
||||
nbs: 64
|
||||
hsv_h: 0.015
|
||||
hsv_s: 0.7
|
||||
hsv_v: 0.4
|
||||
degrees: 0.0
|
||||
translate: 0.1
|
||||
scale: 0.5
|
||||
shear: 0.0
|
||||
perspective: 0.0
|
||||
flipud: 0.0
|
||||
fliplr: 0.5
|
||||
bgr: 0.0
|
||||
mosaic: 1.0
|
||||
mixup: 0.0
|
||||
cutmix: 0.0
|
||||
copy_paste: 0.0
|
||||
copy_paste_mode: flip
|
||||
auto_augment: randaugment
|
||||
erasing: 0.4
|
||||
cfg: null
|
||||
tracker: botsort.yaml
|
||||
save_dir: UAVid_Segmentation\v1.5_official5
|
||||
BIN
UAVid_Segmentation/v1.5_official5/labels.jpg
Normal file
|
After Width: | Height: | Size: 106 KiB |
3
UAVid_Segmentation/v1.5_official5/results.csv
Normal file
@ -0,0 +1,3 @@
|
||||
epoch,time,train/box_loss,train/seg_loss,train/cls_loss,train/dfl_loss,metrics/precision(B),metrics/recall(B),metrics/mAP50(B),metrics/mAP50-95(B),metrics/precision(M),metrics/recall(M),metrics/mAP50(M),metrics/mAP50-95(M),val/box_loss,val/seg_loss,val/cls_loss,val/dfl_loss,lr/pg0,lr/pg1,lr/pg2
|
||||
1,2.26767,4.19941,6.41601,8.47523,3.35484,0.01042,0.5,0.24875,0.07462,0,0,0,0,3.51347,4.84009,5.55821,3.24894,0.0955,0.0005,0.0005
|
||||
2,3.67956,3.71605,7.05627,8.7431,2.98561,0.02083,0.5,0.16583,0.03317,0,0,0,0,3.61686,4.8416,6.10662,3.2123,0.0900782,0.00107822,0.00107822
|
||||
|
BIN
UAVid_Segmentation/v1.5_official5/train_batch0.jpg
Normal file
|
After Width: | Height: | Size: 179 KiB |
BIN
UAVid_Segmentation/v1.5_official5/train_batch1.jpg
Normal file
|
After Width: | Height: | Size: 184 KiB |
BIN
UAVid_Segmentation/v1.5_official5/train_batch2.jpg
Normal file
|
After Width: | Height: | Size: 176 KiB |
BIN
UAVid_Segmentation/v1.5_official5/weights/best.pt
Normal file
BIN
UAVid_Segmentation/v1.5_official5/weights/last.pt
Normal file
BIN
__pycache__/download_train.cpython-312.pyc
Normal file
BIN
__pycache__/query_process_status.cpython-312.pyc
Normal file
BIN
dataset/predictions-20250711-111516-531.zip
Normal file
BIN
dataset/zip_dir1760962080216800000.zip
Normal file
799
download_train.py
Normal file
@ -0,0 +1,799 @@
|
||||
# import asyncio
|
||||
# import os.path
|
||||
# import shutil
|
||||
# import sys
|
||||
# import threading
|
||||
# import time
|
||||
# from pathlib import Path
|
||||
#
|
||||
# from hachoir.parser.image.iptc import datasets
|
||||
#
|
||||
# from middleware.minio_util import downFullPathFile
|
||||
# from middleware.query_model import ModelConfigDAO
|
||||
# import yaml
|
||||
# import multiprocessing
|
||||
# import torch
|
||||
# from ultralytics import YOLO
|
||||
#
|
||||
#
|
||||
# async def download_train(task_id: str, bz_training_task_id: int, pt_name: str):
|
||||
#
|
||||
# DB_CONFIG = {
|
||||
# "dbname": "smart_dev_123",
|
||||
# "user": "postgres",
|
||||
# "password": "root",
|
||||
# "host": "8.137.54.85",
|
||||
# "port": "5060"
|
||||
# }
|
||||
#
|
||||
# # 创建DAO实例
|
||||
# dao = ModelConfigDAO(DB_CONFIG)
|
||||
# time_ns=time.time_ns()
|
||||
# output_root=f"dataset-{time_ns}"
|
||||
# if not os.path.exists(output_root):
|
||||
# os.mkdir(output_root)
|
||||
# list_labels = dao.get_labels(bz_training_task_id)
|
||||
# list_datasets = dao.get_datasets(bz_training_task_id)
|
||||
# label_yaml_list = dao.get_label_yaml(bz_training_task_id)
|
||||
#
|
||||
# # 定义数据结构(字典)
|
||||
# uavid_config = {
|
||||
#
|
||||
# "path": "", # 替换为你的绝对路径
|
||||
# "train": "images/train", # 训练集路径
|
||||
# "val": "images/val", # 验证集路径
|
||||
# "test": "images/test", # 测试集路径(可选)
|
||||
# "names": {}
|
||||
# }
|
||||
# uavid_config["path"]=os.path.abspath(output_root)
|
||||
# for i,item in enumerate(label_yaml_list):
|
||||
# item.id_order=i
|
||||
# uavid_config["names"][f"{i}"]=item.e_name
|
||||
# # 生成 YAML 文件
|
||||
# data_yaml="data.yaml"
|
||||
# with open(data_yaml, "w", encoding="utf-8") as f:
|
||||
# yaml.dump(
|
||||
# uavid_config,
|
||||
# f,
|
||||
# default_flow_style=False, # 禁用紧凑格式(保持多行)
|
||||
# allow_unicode=True, # 允许 Unicode 字符
|
||||
# sort_keys=False # 保持键的顺序
|
||||
# )
|
||||
# file_name = os.path.basename(data_yaml)
|
||||
# des_path = os.path.join(output_root, file_name)
|
||||
# if os.path.exists(des_path):
|
||||
# os.remove(des_path)
|
||||
# shutil.move(data_yaml, output_root)
|
||||
# print(f"output_rootoutput_rootoutput_rootoutput_root {os.path.abspath(output_root)}")
|
||||
#
|
||||
#
|
||||
# print("YAML 文件已生成:uavid_config.yaml")
|
||||
#
|
||||
# invalid_indices = []
|
||||
#
|
||||
# for index, pic in enumerate(list_datasets):
|
||||
# if pic.resource_original_path: # 图像路径有效
|
||||
# download_path = downFullPathFile(pic.resource_original_path)
|
||||
# if download_path: # 下载成功
|
||||
# pic.local_path = download_path
|
||||
#
|
||||
# pic.label_name = Path(download_path).stem # 截取图片名称,用作标签
|
||||
# else:
|
||||
# invalid_indices.append(index) #存储不符合条件的索引,准备删除
|
||||
# else:
|
||||
# invalid_indices.append(index)#存储不符合条件的索引,准备删除
|
||||
#
|
||||
#
|
||||
# # 从后往前删除(避免删除时索引错乱),删除不符合条件的list_datasets
|
||||
#
|
||||
# for idx in sorted(invalid_indices, reverse=True):
|
||||
# del list_datasets[idx]
|
||||
#
|
||||
# for data_pic in list_datasets: #整理完整的图像与标签集的对应关系
|
||||
# for label in list_labels:
|
||||
# if data_pic.id == label.id:
|
||||
# for item in label_yaml_list:
|
||||
# if label.label_ids==item.id:
|
||||
# data_pic.label_content=data_pic.label_content+item.id_order+" "+label.annotation_data+ '\n'
|
||||
#
|
||||
#
|
||||
# for data_pic in list_datasets:
|
||||
# label_txt = f"{data_pic.label_name}.txt"
|
||||
# with open(label_txt, 'w', encoding='utf-8') as f:
|
||||
# f.write(data_pic.label_content)
|
||||
# data_pic.label_txt_path=os.path.abspath(label_txt)
|
||||
# # 移动文件,制作数据集
|
||||
#
|
||||
#
|
||||
# dataset_dirs = {
|
||||
# "images": Path(output_root) / "images",
|
||||
# "labels": Path(output_root) / "labels"
|
||||
# }
|
||||
# for ds_dir in dataset_dirs.values():
|
||||
# (ds_dir / "val").mkdir(parents=True, exist_ok=True)
|
||||
# (ds_dir / "train").mkdir(parents=True, exist_ok=True)
|
||||
# (ds_dir / "test").mkdir(parents=True, exist_ok=True)
|
||||
#
|
||||
# count_pic=0
|
||||
# for data_pic in list_datasets:
|
||||
# count_pic=count_pic+1
|
||||
# if count_pic%10<8:
|
||||
# images_train_path=dataset_dirs["images"]
|
||||
# image_dir=os.path.join(images_train_path,"train")
|
||||
# file_name=os.path.basename(data_pic.local_path)
|
||||
# des_path=os.path.join(image_dir,file_name)
|
||||
# if os.path.exists(des_path):
|
||||
# os.remove(des_path)
|
||||
# shutil.move(data_pic.local_path, image_dir)
|
||||
#
|
||||
#
|
||||
# labels_train_path=dataset_dirs["labels"]
|
||||
# label_dir=os.path.join(labels_train_path,"train")
|
||||
#
|
||||
# file_name=os.path.basename(data_pic.label_txt_path)
|
||||
# des_path=os.path.join(label_dir,file_name)
|
||||
# if os.path.exists(des_path):
|
||||
# os.remove(des_path)
|
||||
# shutil.move(data_pic.label_txt_path, label_dir)
|
||||
# if count_pic%10==8:
|
||||
# images_val_path=dataset_dirs["images"]
|
||||
# image_dir=os.path.join(images_val_path,"val")
|
||||
# file_name=os.path.basename(data_pic.local_path)
|
||||
# des_path=os.path.join(image_dir,file_name)
|
||||
# if os.path.exists(des_path):
|
||||
# os.remove(des_path)
|
||||
# shutil.move(data_pic.local_path, image_dir)
|
||||
#
|
||||
#
|
||||
# labels_val_path=dataset_dirs["labels"]
|
||||
# label_dir=os.path.join(labels_val_path,"val")
|
||||
# file_name=os.path.basename(data_pic.label_txt_path)
|
||||
# des_path=os.path.join(label_dir,file_name)
|
||||
# if os.path.exists(des_path):
|
||||
# os.remove(des_path)
|
||||
# shutil.move(data_pic.label_txt_path, label_dir)
|
||||
#
|
||||
# if count_pic%10==9:
|
||||
# images_test_path=dataset_dirs["images"]
|
||||
# image_dir=os.path.join(images_test_path,"test")
|
||||
# file_name=os.path.basename(data_pic.local_path)
|
||||
# des_path=os.path.join(image_dir,file_name)
|
||||
# if os.path.exists(des_path):
|
||||
# os.remove(des_path)
|
||||
# shutil.move(data_pic.local_path, image_dir)
|
||||
#
|
||||
#
|
||||
# labels_test_path=dataset_dirs["labels"]
|
||||
# label_dir=os.path.join(labels_test_path,"test")
|
||||
# file_name=os.path.basename(data_pic.label_txt_path)
|
||||
# des_path=os.path.join(label_dir,file_name)
|
||||
# if os.path.exists(des_path):
|
||||
# os.remove(des_path)
|
||||
# shutil.move(data_pic.label_txt_path, label_dir)
|
||||
#
|
||||
#
|
||||
#
|
||||
# custom_config = {
|
||||
# "epochs": 50, # 快速测试用
|
||||
# "batch_size": 4,
|
||||
# }
|
||||
#
|
||||
# # 启动后台训练
|
||||
# pid = await run_background_training(
|
||||
# dataset_dir=output_root,
|
||||
# weight_name=pt_name,
|
||||
# config_overrides=custom_config
|
||||
# )
|
||||
#
|
||||
# print(f"pid--{pid}")
|
||||
# dao.insert_train_pid(task_id,train_pid=pid)
|
||||
#
|
||||
#
|
||||
# def train_model(dataset_dir,weight_name="best_segmentation_model.pt", config_overrides=None):
|
||||
# """
|
||||
# 训练模型并保存权重
|
||||
# :param weight_name: 自定义权重文件名(如 "uavid_seg_v1.pt")
|
||||
# :param config_overrides: 覆盖默认配置的字典(可选)
|
||||
# """
|
||||
# # 合并配置(允许通过参数覆盖默认配置)
|
||||
#
|
||||
# # 默认配置(可通过函数参数覆盖)
|
||||
# DEFAULT_CONFIG = {
|
||||
# "model": "pt/yolo11s-seg.pt",
|
||||
# "pretrained": True,
|
||||
# "data": os.path.join(dataset_dir, "data.yaml"), # 关键修改:指向 data.yaml
|
||||
# "project": "UAVid_Segmentation",
|
||||
# "name": "v1.5_official",
|
||||
# "epochs": 1000,
|
||||
# "batch_size": 8,
|
||||
# "img_size": 640,
|
||||
# "workers": 4,
|
||||
# "optimizer": "SGD",
|
||||
# "lr0": 0.01,
|
||||
# "lrf": 0.01,
|
||||
# "momentum": 0.9,
|
||||
# "weight_decay": 0.0005,
|
||||
# "augment": True,
|
||||
# "hyp": {
|
||||
# "mosaic": 0.5,
|
||||
# "copy_paste": 0.2,
|
||||
# "mixup": 0.15,
|
||||
# },
|
||||
# }
|
||||
#
|
||||
# config = DEFAULT_CONFIG.copy()
|
||||
# if config_overrides:
|
||||
# config.update(config_overrides)
|
||||
#
|
||||
# # 初始化模型
|
||||
# model = YOLO(config["model"])
|
||||
#
|
||||
# # 开始训练
|
||||
# results = model.train(
|
||||
# data=config["data"],
|
||||
# project=config["project"],
|
||||
# name=config["name"],
|
||||
# epochs=config["epochs"],
|
||||
# batch=config["batch_size"],
|
||||
# imgsz=config["img_size"],
|
||||
# workers=config["workers"],
|
||||
# optimizer=config["optimizer"],
|
||||
# lr0=config["lr0"],
|
||||
# lrf=config["lrf"],
|
||||
# momentum=config["momentum"],
|
||||
# weight_decay=config["weight_decay"],
|
||||
# augment=config["augment"],
|
||||
# device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
|
||||
# )
|
||||
#
|
||||
# # 验证模型
|
||||
# metrics = model.val()
|
||||
# print(f"Validation mAP: {metrics.box_map:.2f} (box), {metrics.seg_map:.2f} (mask)")
|
||||
#
|
||||
# # 保存最佳模型(使用自定义名称)
|
||||
# best_model = results.best_model
|
||||
# torch.save(best_model, weight_name)
|
||||
# print(f"Best model saved to: {weight_name}")
|
||||
#
|
||||
#
|
||||
# # def run_background_training(output_root: str, weight_name="best_segmentation_model.pt", config_overrides=None):
|
||||
# # """使用 spawn 上下文启动进程"""
|
||||
# # ctx = multiprocessing.get_context('spawn')
|
||||
# # process = ctx.Process(
|
||||
# # target=train_model,
|
||||
# # args=(output_root, weight_name, config_overrides),
|
||||
# # daemon=False
|
||||
# # )
|
||||
# # process.start()
|
||||
# # return process.pid
|
||||
#
|
||||
# import asyncio
|
||||
#
|
||||
# async def run_background_training(dataset_dir, weight_name, config_overrides=None):
|
||||
# """异步启动训练进程"""
|
||||
# process = await asyncio.create_subprocess_exec(
|
||||
# sys.executable,
|
||||
# "train_worker.py",
|
||||
# "--dataset", dataset_dir,
|
||||
# "--weight", weight_name,
|
||||
# "--epochs", str(config_overrides.get("epochs", 50)),
|
||||
# "--batch", str(config_overrides.get("batch", 4)),
|
||||
# stdout=asyncio.subprocess.PIPE,
|
||||
# stderr=asyncio.subprocess.PIPE,
|
||||
# )
|
||||
# return process.pid
|
||||
|
||||
|
||||
import asyncio
|
||||
import os.path
|
||||
import shutil
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import subprocess
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from middleware.minio_util import downFullPathFile
|
||||
from middleware.query_model import ModelConfigDAO
|
||||
import yaml
|
||||
import torch
|
||||
from ultralytics import YOLO
|
||||
|
||||
|
||||
async def download_train(task_id: str, bz_training_task_id: int, pt_name: str):
|
||||
"""
|
||||
下载训练数据并启动训练
|
||||
这个函数负责准备数据,然后使用线程+subprocess创建独立进程执行训练
|
||||
"""
|
||||
try:
|
||||
current_pid = os.getpid()
|
||||
print(f"Starting download and training for task {task_id} in process {current_pid}")
|
||||
|
||||
DB_CONFIG = {
|
||||
"dbname": "smart_dev_123",
|
||||
"user": "postgres",
|
||||
"password": "root",
|
||||
"host": "8.137.54.85",
|
||||
"port": "5060"
|
||||
}
|
||||
|
||||
# 创建DAO实例
|
||||
dao = ModelConfigDAO(DB_CONFIG)
|
||||
time_ns = time.time_ns()
|
||||
|
||||
output_root = f"dataset/dataset-{time_ns}"
|
||||
|
||||
try:
|
||||
if not os.path.exists(output_root):
|
||||
os.mkdir(output_root)
|
||||
print(f"Created output directory: {output_root}")
|
||||
except Exception as e:
|
||||
print(f"Failed to create output directory: {e}")
|
||||
raise
|
||||
|
||||
try:
|
||||
# 获取标签和数据集信息
|
||||
list_labels = dao.get_labels(bz_training_task_id)
|
||||
list_datasets = dao.get_datasets(bz_training_task_id)
|
||||
label_yaml_list = dao.get_label_yaml(bz_training_task_id)
|
||||
|
||||
print(
|
||||
f"Retrieved {len(list_labels)} labels, {len(list_datasets)} datasets, {len(label_yaml_list)} label configs")
|
||||
except Exception as e:
|
||||
print(f"Failed to retrieve data from database: {e}")
|
||||
raise
|
||||
|
||||
# 定义数据结构(字典)
|
||||
uavid_config = {
|
||||
"path": "", # 替换为你的绝对路径
|
||||
"train": "images/train", # 训练集路径
|
||||
"val": "images/val", # 验证集路径
|
||||
"test": "images/test", # 测试集路径(可选)
|
||||
"names": {}
|
||||
}
|
||||
|
||||
try:
|
||||
uavid_config["path"] = os.path.abspath(output_root)
|
||||
for i, item in enumerate(label_yaml_list):
|
||||
item.id_order = i
|
||||
uavid_config["names"][f"{i}"] = item.e_name
|
||||
|
||||
# 生成 YAML 文件
|
||||
data_yaml = "data.yaml"
|
||||
with open(data_yaml, "w", encoding="utf-8") as f:
|
||||
yaml.dump(
|
||||
uavid_config,
|
||||
f,
|
||||
default_flow_style=False, # 禁用紧凑格式(保持多行)
|
||||
allow_unicode=True, # 允许 Unicode 字符
|
||||
sort_keys=False # 保持键的顺序
|
||||
)
|
||||
|
||||
file_name = os.path.basename(data_yaml)
|
||||
des_path = os.path.join(output_root, file_name)
|
||||
if os.path.exists(des_path):
|
||||
os.remove(des_path)
|
||||
shutil.move(data_yaml, output_root)
|
||||
print(f"Generated YAML config at: {os.path.abspath(output_root)}")
|
||||
except Exception as e:
|
||||
print(f"Failed to generate YAML config: {e}")
|
||||
raise
|
||||
|
||||
# 下载数据集
|
||||
invalid_indices = []
|
||||
|
||||
try:
|
||||
for index, pic in enumerate(list_datasets):
|
||||
if hasattr(pic, 'resource_original_path') and pic.resource_original_path: # 图像路径有效
|
||||
try:
|
||||
download_path = downFullPathFile(pic.resource_original_path)
|
||||
if download_path: # 下载成功
|
||||
pic.local_path = download_path
|
||||
pic.label_name = Path(download_path).stem # 截取图片名称,用作标签
|
||||
print(f"Downloaded file: {download_path}")
|
||||
else:
|
||||
invalid_indices.append(index) # 存储不符合条件的索引,准备删除
|
||||
print(f"Failed to download file: {pic.resource_original_path}")
|
||||
except Exception as e:
|
||||
invalid_indices.append(index)
|
||||
print(f"Error downloading file {pic.resource_original_path}: {e}")
|
||||
else:
|
||||
invalid_indices.append(index) # 存储不符合条件的索引,准备删除
|
||||
except Exception as e:
|
||||
print(f"Error processing datasets: {e}")
|
||||
raise
|
||||
|
||||
# 从后往前删除(避免删除时索引错乱),删除不符合条件的list_datasets
|
||||
try:
|
||||
for idx in sorted(invalid_indices, reverse=True):
|
||||
del list_datasets[idx]
|
||||
|
||||
print(f"Filtered datasets: {len(list_datasets)} valid items remaining")
|
||||
except Exception as e:
|
||||
print(f"Error filtering datasets: {e}")
|
||||
raise
|
||||
|
||||
# 整理标签内容
|
||||
try:
|
||||
for data_pic in list_datasets: # 整理完整的图像与标签集的对应关系
|
||||
for label in list_labels:
|
||||
if hasattr(data_pic, 'id') and hasattr(label, 'id') and data_pic.id == label.id:
|
||||
for item in label_yaml_list:
|
||||
if hasattr(label, 'label_ids') and hasattr(item, 'id') and label.label_ids == item.id:
|
||||
# 假设label有annotation_data属性
|
||||
annotation = getattr(label, 'annotation_data', '')
|
||||
current_content = getattr(data_pic, 'label_content', '')
|
||||
data_pic.label_content = f"{current_content}{item.id_order} {annotation}\n"
|
||||
except Exception as e:
|
||||
print(f"Error organizing labels: {e}")
|
||||
raise
|
||||
|
||||
# 创建标签文件
|
||||
try:
|
||||
for data_pic in list_datasets:
|
||||
if hasattr(data_pic, 'label_name'):
|
||||
label_txt = f"{data_pic.label_name}.txt"
|
||||
with open(label_txt, 'w', encoding='utf-8') as f:
|
||||
f.write(getattr(data_pic, 'label_content', ''))
|
||||
data_pic.label_txt_path = os.path.abspath(label_txt)
|
||||
print(f"Created label file: {label_txt}")
|
||||
except Exception as e:
|
||||
print(f"Error creating label files: {e}")
|
||||
raise
|
||||
|
||||
# 移动文件,制作数据集
|
||||
try:
|
||||
dataset_dirs = {
|
||||
"images": Path(output_root) / "images",
|
||||
"labels": Path(output_root) / "labels"
|
||||
}
|
||||
|
||||
for ds_dir in dataset_dirs.values():
|
||||
(ds_dir / "val").mkdir(parents=True, exist_ok=True)
|
||||
(ds_dir / "train").mkdir(parents=True, exist_ok=True)
|
||||
(ds_dir / "test").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
print("Created dataset directory structure")
|
||||
except Exception as e:
|
||||
print(f"Error creating dataset directories: {e}")
|
||||
raise
|
||||
|
||||
# 分配数据集到训练、验证、测试集
|
||||
try:
|
||||
count_pic = 0
|
||||
for data_pic in list_datasets:
|
||||
count_pic += 1
|
||||
|
||||
# 80% 训练集, 10% 验证集, 10% 测试集
|
||||
if count_pic % 10 < 8:
|
||||
split = "train"
|
||||
elif count_pic % 10 == 8:
|
||||
split = "val"
|
||||
else: # count_pic % 10 == 9
|
||||
split = "test"
|
||||
|
||||
# 移动图像文件
|
||||
if hasattr(data_pic, 'local_path') and os.path.exists(data_pic.local_path):
|
||||
images_path = dataset_dirs["images"]
|
||||
image_dir = os.path.join(images_path, split)
|
||||
file_name = os.path.basename(data_pic.local_path)
|
||||
des_path = os.path.join(image_dir, file_name)
|
||||
|
||||
if os.path.exists(des_path):
|
||||
os.remove(des_path)
|
||||
shutil.move(data_pic.local_path, image_dir)
|
||||
|
||||
# 移动标签文件
|
||||
if hasattr(data_pic, 'label_txt_path') and os.path.exists(data_pic.label_txt_path):
|
||||
labels_path = dataset_dirs["labels"]
|
||||
label_dir = os.path.join(labels_path, split)
|
||||
file_name = os.path.basename(data_pic.label_txt_path)
|
||||
des_path = os.path.join(label_dir, file_name)
|
||||
|
||||
if os.path.exists(des_path):
|
||||
os.remove(des_path)
|
||||
shutil.move(data_pic.label_txt_path, label_dir)
|
||||
|
||||
print(f"Organized {count_pic} files into dataset splits")
|
||||
except Exception as e:
|
||||
print(f"Error organizing dataset splits: {e}")
|
||||
raise
|
||||
|
||||
# 训练配置
|
||||
custom_config = {
|
||||
"epochs": 50, # 快速测试用
|
||||
"batch_size": 4,
|
||||
"workers": 0, # 禁用多进程数据加载
|
||||
}
|
||||
|
||||
# 保存训练配置到文件
|
||||
config_file = f"train_config_{task_id}.json"
|
||||
with open(config_file, 'w', encoding='utf-8') as f:
|
||||
json.dump({
|
||||
'dataset_dir': output_root,
|
||||
'pt_name': pt_name,
|
||||
'config_overrides': custom_config,
|
||||
'db_config': DB_CONFIG,
|
||||
'task_id': task_id
|
||||
}, f)
|
||||
|
||||
print(f"Training data preparation completed for task {task_id}")
|
||||
|
||||
# 在Windows上使用线程+subprocess创建训练进程
|
||||
# 避免使用asyncio.create_subprocess_exec
|
||||
loop = asyncio.get_event_loop()
|
||||
training_pid = await loop.run_in_executor(
|
||||
None, # 使用默认的线程池
|
||||
start_training_process,
|
||||
config_file
|
||||
)
|
||||
|
||||
if training_pid:
|
||||
print(f"pid--{training_pid}")
|
||||
dao.insert_train_pid(task_id, train_pid=training_pid)
|
||||
return training_pid
|
||||
else:
|
||||
raise Exception("Failed to start training process")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Training failed for task {task_id}: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
def start_training_process(config_file: str) -> int:
|
||||
"""
|
||||
在独立线程中启动训练进程
|
||||
使用subprocess.Popen创建训练进程
|
||||
"""
|
||||
try:
|
||||
# 创建训练脚本内容
|
||||
train_script = '''
|
||||
import sys
|
||||
import json
|
||||
import os
|
||||
import torch
|
||||
from ultralytics import YOLO
|
||||
|
||||
class MockModelConfigDAO:
|
||||
def __init__(self, db_config):
|
||||
self.db_config = db_config
|
||||
|
||||
def insert_train_pid(self, task_id, train_pid):
|
||||
print(f"Inserted training PID {train_pid} for task {task_id}")
|
||||
|
||||
def train_model(dataset_dir, weight_name="best_segmentation_model.pt", config_overrides=None):
|
||||
"""
|
||||
训练模型并保存权重
|
||||
"""
|
||||
try:
|
||||
current_pid = os.getpid()
|
||||
print(f"Starting model training in process {current_pid} with dataset: {dataset_dir}")
|
||||
|
||||
# 默认配置(可通过参数覆盖)
|
||||
DEFAULT_CONFIG = {
|
||||
"model": "pt/yolo11s-seg.pt",
|
||||
"pretrained": True,
|
||||
"data": os.path.join(dataset_dir, "data.yaml"),
|
||||
"project": "UAVid_Segmentation",
|
||||
"name": "v1.5_official",
|
||||
"epochs": 1000,
|
||||
"batch_size": 8,
|
||||
"img_size": 640,
|
||||
"workers": 0, # 禁用多进程数据加载
|
||||
"optimizer": "SGD",
|
||||
"lr0": 0.01,
|
||||
"lrf": 0.01,
|
||||
"momentum": 0.9,
|
||||
"weight_decay": 0.0005,
|
||||
"augment": True,
|
||||
"hyp": {
|
||||
"mosaic": 0.5,
|
||||
"copy_paste": 0.2,
|
||||
"mixup": 0.15,
|
||||
},
|
||||
}
|
||||
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
if config_overrides:
|
||||
config.update(config_overrides)
|
||||
|
||||
print(f"Training config: {config}")
|
||||
|
||||
# 检查数据配置文件
|
||||
data_path = config["data"]
|
||||
if not os.path.exists(data_path):
|
||||
raise FileNotFoundError(f"Data configuration file not found: {data_path}")
|
||||
|
||||
# 初始化模型
|
||||
model = YOLO(config["model"])
|
||||
print(f"Model initialized with: {config["model"]}")
|
||||
|
||||
# 开始训练
|
||||
results = model.train(
|
||||
data=config["data"],
|
||||
project=config["project"],
|
||||
name=config["name"],
|
||||
epochs=config["epochs"],
|
||||
batch=config["batch_size"],
|
||||
imgsz=config["img_size"],
|
||||
workers=config["workers"],
|
||||
optimizer=config["optimizer"],
|
||||
lr0=config["lr0"],
|
||||
lrf=config["lrf"],
|
||||
momentum=config["momentum"],
|
||||
weight_decay=config["weight_decay"],
|
||||
augment=config["augment"],
|
||||
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
|
||||
)
|
||||
print(f"Training completed successfully in process {current_pid}")
|
||||
|
||||
# 验证模型
|
||||
metrics = model.val()
|
||||
print(f"Validation mAP: {metrics.box:.2f} (box), {metrics.seg:.2f} (mask)")
|
||||
|
||||
# 保存最佳模型
|
||||
try:
|
||||
if hasattr(results, 'best') and results.best:
|
||||
best_model_path = results.best
|
||||
if os.path.exists(best_model_path):
|
||||
import shutil
|
||||
shutil.copy2(best_model_path, weight_name)
|
||||
print(f"Best model saved to: {os.path.abspath(weight_name)}")
|
||||
else:
|
||||
torch.save(model.state_dict(), weight_name)
|
||||
print(f"Best model path not found, saved state dict to: {weight_name}")
|
||||
else:
|
||||
torch.save(model.state_dict(), weight_name)
|
||||
print(f"Saved model state dict to: {weight_name}")
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to save best model: {e}")
|
||||
torch.save(model.state_dict(), weight_name)
|
||||
print(f"Fallback: Saved model state dict to: {weight_name}")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"Model training failed in process {os.getpid()}: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python -c '<script>' <config_file>")
|
||||
sys.exit(1)
|
||||
|
||||
config_file = sys.argv[1]
|
||||
|
||||
try:
|
||||
with open(config_file, 'r', encoding='utf-8') as f:
|
||||
config = json.load(f)
|
||||
|
||||
# 提取配置
|
||||
dataset_dir = config['dataset_dir']
|
||||
pt_name = config['pt_name']
|
||||
config_overrides = config['config_overrides']
|
||||
db_config = config['db_config']
|
||||
task_id = config['task_id']
|
||||
|
||||
# 获取当前进程ID
|
||||
pid = os.getpid()
|
||||
print(f"Training process started for task {task_id} with PID {pid}")
|
||||
|
||||
# 记录PID到数据库
|
||||
try:
|
||||
from middleware.query_model import ModelConfigDAO
|
||||
dao = ModelConfigDAO(db_config)
|
||||
except ImportError:
|
||||
dao = MockModelConfigDAO(db_config)
|
||||
|
||||
dao.insert_train_pid(task_id, train_pid=pid)
|
||||
|
||||
# 执行训练
|
||||
success = train_model(dataset_dir, pt_name, config_overrides)
|
||||
|
||||
if success:
|
||||
print(f"Training completed successfully for task {task_id}")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(f"Training failed for task {task_id}")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Training error: {e}", file=sys.stderr)
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
'''
|
||||
|
||||
# 保存训练脚本
|
||||
script_path = f"train_worker_{os.path.basename(config_file).split('_')[2].split('.')[0]}.py"
|
||||
with open(script_path, 'w', encoding='utf-8') as f:
|
||||
f.write(train_script)
|
||||
|
||||
# 使用subprocess.Popen创建训练进程
|
||||
# 在Windows上使用shell=True可以解决一些路径问题
|
||||
process = subprocess.Popen([
|
||||
sys.executable, script_path, config_file
|
||||
], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=False)
|
||||
|
||||
print(f"Started training process with PID {process.pid}")
|
||||
|
||||
# 启动线程来处理输出
|
||||
threading.Thread(target=handle_process_output, args=(process,), daemon=True).start()
|
||||
|
||||
return process.pid
|
||||
|
||||
except Exception as e:
|
||||
print(f"Failed to start training process: {e}", exc_info=True)
|
||||
return 0
|
||||
|
||||
|
||||
def handle_process_output(process: subprocess.Popen):
|
||||
"""
|
||||
处理子进程的输出
|
||||
"""
|
||||
try:
|
||||
# 分别读取stdout和stderr
|
||||
def read_stream(stream, stream_name):
|
||||
while True:
|
||||
line = stream.readline()
|
||||
if not line:
|
||||
break
|
||||
line = line.strip()
|
||||
print(f"[{stream_name}] {line}")
|
||||
|
||||
# 启动线程读取stdout和stderr
|
||||
stdout_thread = threading.Thread(target=read_stream, args=(process.stdout, 'STDOUT'))
|
||||
stderr_thread = threading.Thread(target=read_stream, args=(process.stderr, 'STDERR'))
|
||||
|
||||
stdout_thread.start()
|
||||
stderr_thread.start()
|
||||
|
||||
# 等待进程完成
|
||||
stdout_thread.join()
|
||||
stderr_thread.join()
|
||||
|
||||
# 获取返回码
|
||||
return_code = process.wait()
|
||||
print(f"Training process completed with return code: {return_code}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error handling process output: {e}", exc_info=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""
|
||||
直接运行时的入口点
|
||||
用于测试
|
||||
"""
|
||||
import sys
|
||||
|
||||
if len(sys.argv) < 4:
|
||||
print(f"Usage: {sys.argv[0]} <task_id> <train_task_id> <pt_name>")
|
||||
sys.exit(1)
|
||||
|
||||
task_id = sys.argv[1]
|
||||
train_task_id = int(sys.argv[2])
|
||||
pt_name = sys.argv[3]
|
||||
|
||||
try:
|
||||
# 创建事件循环
|
||||
loop = asyncio.get_event_loop()
|
||||
pid = loop.run_until_complete(download_train(task_id, train_task_id, pt_name))
|
||||
print(f"Training started in process {pid}")
|
||||
|
||||
# 保持事件循环运行
|
||||
try:
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
print("Received keyboard interrupt. Exiting...")
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
except Exception as e:
|
||||
print(f"Training failed: {e}", exc_info=True)
|
||||
sys.exit(1)
|
||||
|
||||
BIN
middleware/__pycache__/minio_util.cpython-312.pyc
Normal file
BIN
middleware/__pycache__/query_model.cpython-312.pyc
Normal file
BIN
middleware/__pycache__/util.cpython-312.pyc
Normal file
389
middleware/minio_util.py
Normal file
@ -0,0 +1,389 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
from minio import Minio
|
||||
from minio.error import S3Error
|
||||
import os
|
||||
|
||||
import urllib.parse
|
||||
|
||||
from middleware.util import get_current_date_and_milliseconds
|
||||
|
||||
client = Minio(
|
||||
endpoint="222.212.85.86:9000", # MinIO 服务器地址
|
||||
access_key="adminjdskfj", # 替换为你的 Access Key
|
||||
secret_key="123456ksldjfal@Y", # 替换为你的 Secret Key
|
||||
secure=False # 如果未启用 HTTPS 则设为 False
|
||||
)
|
||||
|
||||
first_dir = 'ai_result'
|
||||
# 配置日志
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def create_bucket():
|
||||
'''
|
||||
访问 MinIO 服务器,打印存储桶
|
||||
'''
|
||||
try:
|
||||
buckets = client.list_buckets()
|
||||
for bucket in buckets:
|
||||
print(f"Bucket: {bucket.name}, Created: {bucket.creation_date}")
|
||||
except S3Error as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
def downFile(object_name):
|
||||
'''下载文件并返回本地路径'''
|
||||
if not object_name or not isinstance(object_name, str):
|
||||
logger.error(f"Invalid object name: {object_name}")
|
||||
return None
|
||||
|
||||
bucket_name = "300bdf2b-a150-406e-be63-d28bd29b409f"
|
||||
try:
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
download_path = os.path.join(current_dir, os.path.basename(object_name))
|
||||
|
||||
# 确保目录存在
|
||||
os.makedirs(os.path.dirname(download_path), exist_ok=True)
|
||||
|
||||
logger.info(f"Attempting to download {object_name} from bucket {bucket_name} to {download_path}")
|
||||
|
||||
client.fget_object(
|
||||
bucket_name=bucket_name,
|
||||
object_name=object_name,
|
||||
file_path=download_path
|
||||
)
|
||||
|
||||
logger.info(f"Successfully downloaded file to: {download_path}")
|
||||
return download_path
|
||||
except S3Error as e:
|
||||
logger.error(f"MinIO download error: {e}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error downloading file: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
|
||||
def downBigFile(object_name):
|
||||
'''下载文件并返回本地路径,支持大文件进度输出'''
|
||||
if not object_name or not isinstance(object_name, str):
|
||||
logger.error(f"Invalid object name: {object_name}")
|
||||
return None
|
||||
|
||||
bucket_name = "300bdf2b-a150-406e-be63-d28bd29b409f"
|
||||
try:
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
download_path = os.path.join(current_dir, os.path.basename(object_name))
|
||||
|
||||
# 确保目录存在
|
||||
os.makedirs(os.path.dirname(download_path), exist_ok=True)
|
||||
|
||||
logger.info(f"Attempting to download {object_name} from bucket {bucket_name} to {download_path}")
|
||||
|
||||
# 获取文件总大小
|
||||
try:
|
||||
stat = client.stat_object(bucket_name, object_name)
|
||||
total_size = stat.size
|
||||
except S3Error as e:
|
||||
logger.error(f"Failed to get object stats: {e}")
|
||||
return None
|
||||
|
||||
# 获取对象数据流
|
||||
response = client.get_object(bucket_name, object_name)
|
||||
|
||||
# 手动实现进度跟踪
|
||||
downloaded_size = 0
|
||||
chunk_size = 8192 # 8KB chunks
|
||||
|
||||
with open(download_path, 'wb') as file:
|
||||
while True:
|
||||
data = response.read(chunk_size)
|
||||
if not data:
|
||||
break
|
||||
file.write(data)
|
||||
downloaded_size += len(data)
|
||||
|
||||
# 打印进度
|
||||
percent = (downloaded_size / total_size) * 100
|
||||
print(f"\r下载进度: {percent:.2f}% ({downloaded_size}/{total_size} bytes)", end="", flush=True)
|
||||
|
||||
print("\n下载完成!") # 换行,避免进度条影响后续日志
|
||||
response.close()
|
||||
response.release_conn()
|
||||
|
||||
logger.info(f"Successfully downloaded file to: {download_path}")
|
||||
return download_path
|
||||
except S3Error as e:
|
||||
logger.error(f"MinIO download error: {e}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error downloading file: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
|
||||
def upload_folder(folder_path, bucket_directory):
|
||||
"""
|
||||
上传文件夹中的所有文件到 MinIO 指定目录
|
||||
:param folder_path: 本地文件夹路径
|
||||
:param bucket_name: MinIO 存储桶名称
|
||||
:param bucket_directory: MinIO 存储桶内的目标目录(可选)
|
||||
"""
|
||||
# 要下载的桶名和对象名
|
||||
bucket_name = "300bdf2b-a150-406e-be63-d28bd29b409f" # 你的桶名称
|
||||
ai_dir_name = "ai_result"
|
||||
formatted_date, milliseconds_timestamp = get_current_date_and_milliseconds()
|
||||
dir_name = os.path.basename(os.path.normpath(folder_path))
|
||||
file_save_dir = f"{ai_dir_name}/{str(formatted_date)}/{dir_name}"
|
||||
try:
|
||||
# 确保存储桶存在
|
||||
if not client.bucket_exists(bucket_name):
|
||||
print(f"存储桶 {bucket_name} 不存在")
|
||||
|
||||
# 遍历文件夹中的所有文件
|
||||
for root, _, files in os.walk(folder_path):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
file_path_dir = os.path.dirname(folder_path)
|
||||
|
||||
relative_path = os.path.relpath(file_path, start=file_path_dir)
|
||||
relative_path = relative_path.replace(os.sep, '/') # 替换文件夹分割符号
|
||||
|
||||
object_name = f"{file_save_dir}/{relative_path}"
|
||||
# if bucket_directory:
|
||||
# object_name = f"{file_save_dir}/{str(milliseconds_timestamp)}-{file_name}"
|
||||
# else:
|
||||
# object_name = f"{file_save_dir}//{str(milliseconds_timestamp)}-{file_name}"
|
||||
# 上传文件
|
||||
client.fput_object(bucket_name, object_name, file_path)
|
||||
print(f"文件 {file_path} 已上传至 {bucket_name}/{object_name}")
|
||||
|
||||
return file_save_dir
|
||||
except S3Error as e:
|
||||
print(f"上传文件夹时出错: {e}")
|
||||
|
||||
|
||||
def upload_file(file_path, bucket_directory):
|
||||
"""
|
||||
上传文件到 MinIO 指定目录
|
||||
:param file_path: 本地文件路径
|
||||
:param bucket_name: MinIO 存储桶名称
|
||||
:param bucket_directory: MinIO 存储桶内的目标目录(可选)
|
||||
"""
|
||||
# 要下载的桶名和对象名
|
||||
bucket_name = "300bdf2b-a150-406e-be63-d28bd29b409f" # 你的桶名称
|
||||
dir_name = "ai_result"
|
||||
try:
|
||||
# 确保存储桶存在
|
||||
if not client.bucket_exists(bucket_name):
|
||||
print(f"存储桶 {bucket_name} 不存在")
|
||||
|
||||
# 获取文件名
|
||||
file_name = os.path.basename(file_path)
|
||||
|
||||
formatted_date, milliseconds_timestamp = get_current_date_and_milliseconds()
|
||||
# 如果指定了桶目录,则添加前缀
|
||||
if bucket_directory:
|
||||
object_name = f"{dir_name}/{str(formatted_date)}/{str(milliseconds_timestamp)}-{file_name}"
|
||||
else:
|
||||
object_name = f"{dir_name}/{str(formatted_date)}/{str(milliseconds_timestamp)}-{file_name}"
|
||||
|
||||
# 上传文件
|
||||
client.fput_object(bucket_name, object_name, file_path)
|
||||
print(f"文件 {file_path} 已上传至 {bucket_name}/{object_name}")
|
||||
return object_name, "pic"
|
||||
except S3Error as e:
|
||||
print(f"上传文件时出错: {e}")
|
||||
|
||||
|
||||
# 将内存中的缓存,直接上传minio,不做本地存储
|
||||
def upload_file_from_buffer(buffer, file_name, bucket_directory=None):
|
||||
"""
|
||||
上传二进制流到 MinIO 指定目录
|
||||
:param buffer: BytesIO 对象,包含要上传的二进制数据
|
||||
:param bucket_name: MinIO 存储桶名称
|
||||
:param bucket_directory: MinIO 存储桶内的目标目录(可选)
|
||||
"""
|
||||
bucket_name = "300bdf2b-a150-406e-be63-d28bd29b409f" # 你的桶名称
|
||||
dir_name = "ai_result"
|
||||
try:
|
||||
# 确保存储桶存在
|
||||
if not client.bucket_exists(bucket_name):
|
||||
print(f"存储桶 {bucket_name} 不存在")
|
||||
return None
|
||||
|
||||
# 获取文件名(如果没有指定目录,则使用默认文件名)
|
||||
# file_name = "uploaded_file.png" # 默认文件名,可以根据需要修改
|
||||
|
||||
if file_name is None:
|
||||
file_name = "frame.jpg"
|
||||
|
||||
formatted_date, milliseconds_timestamp = get_current_date_and_milliseconds()
|
||||
# # 如果指定了桶目录,则添加前缀
|
||||
# if bucket_directory:
|
||||
# object_name = f"{dir_name}/{bucket_directory.rstrip('/')}/{file_name}"
|
||||
# else:
|
||||
# object_name = f"{dir_name}/{file_name}"
|
||||
if bucket_directory:
|
||||
object_name = f"{dir_name}/{str(formatted_date)}/{str(milliseconds_timestamp)}-{file_name}"
|
||||
else:
|
||||
object_name = f"{dir_name}/{str(formatted_date)}/{str(milliseconds_timestamp)}-{file_name}"
|
||||
# 上传二进制流
|
||||
# 注意:buffer.getvalue() 返回二进制数据
|
||||
client.put_object(
|
||||
bucket_name=bucket_name,
|
||||
object_name=object_name,
|
||||
data=buffer,
|
||||
length=buffer.getbuffer().nbytes,
|
||||
content_type="image/png" # 根据实际内容类型设置
|
||||
)
|
||||
print(f"二进制流已上传至 {bucket_name}/{object_name}")
|
||||
return object_name, "pic"
|
||||
except S3Error as e:
|
||||
print(f"上传二进制流时出错: {e}")
|
||||
return None
|
||||
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
def upload_frame_buff_from_buffer(frame_buff, file_name=None, bucket_directory=None):
|
||||
"""
|
||||
上传二进制流到 MinIO 指定目录
|
||||
:param frame_buff: bytes 对象,包含要上传的二进制数据
|
||||
:param file_name: 可选,指定文件名
|
||||
:param bucket_directory: MinIO 存储桶内的目标目录(可选)
|
||||
"""
|
||||
bucket_name = "300bdf2b-a150-406e-be63-d28bd29b409f"
|
||||
dir_name = "ai_result"
|
||||
|
||||
try:
|
||||
if not client.bucket_exists(bucket_name):
|
||||
print(f"存储桶 {bucket_name} 不存在")
|
||||
return None
|
||||
|
||||
if file_name is None:
|
||||
file_name = "frame.jpg"
|
||||
|
||||
formatted_date, milliseconds_timestamp = get_current_date_and_milliseconds()
|
||||
object_name = f"{dir_name}/{str(formatted_date)}/{str(milliseconds_timestamp)}-{file_name}"
|
||||
|
||||
# 将 bytes 包装在 BytesIO 对象中
|
||||
buffer = BytesIO(frame_buff)
|
||||
|
||||
client.put_object(
|
||||
bucket_name=bucket_name,
|
||||
object_name=object_name,
|
||||
data=buffer,
|
||||
length=len(frame_buff), # 使用原始 bytes 的长度
|
||||
content_type="image/jpeg"
|
||||
)
|
||||
print(f"二进制流已上传至 {bucket_name}/{object_name}")
|
||||
return object_name, "pic"
|
||||
except S3Error as e:
|
||||
print(f"上传二进制流时出错: {e}")
|
||||
return None
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def upload_video_buff_from_buffer(video_buff, file_name=None, bucket_directory=None, video_format="mp4"):
|
||||
"""
|
||||
上传视频二进制流(MP4/FLV)到 MinIO 指定目录
|
||||
:param video_buff: bytes 对象,包含要上传的视频二进制数据
|
||||
:param file_name: 可选,指定视频文件名(无需扩展名,由 video_format 决定)
|
||||
:param bucket_directory: MinIO 存储桶内的目标目录(可选)
|
||||
:param video_format: 视频格式,支持 "mp4" 或 "flv"
|
||||
:return: 上传后的对象路径和文件类型("video"),失败时返回 None
|
||||
"""
|
||||
bucket_name = "300bdf2b-a150-406e-be63-d28bd29b409f"
|
||||
dir_name = "ai_result" # 默认目录
|
||||
|
||||
try:
|
||||
if not client.bucket_exists(bucket_name):
|
||||
print(f"存储桶 {bucket_name} 不存在")
|
||||
return None
|
||||
|
||||
# 1. 处理文件名和扩展名
|
||||
if file_name is None:
|
||||
file_name = "video" # 默认无扩展名
|
||||
# 根据 video_format 添加扩展名
|
||||
if video_format.lower() == "flv":
|
||||
file_name = f"{file_name}.flv" if not file_name.lower().endswith(".flv") else file_name
|
||||
content_type = "video/x-flv" # FLV 的 MIME 类型
|
||||
else: # 默认 MP4
|
||||
file_name = f"{file_name}.mp4" if not file_name.lower().endswith(".mp4") else file_name
|
||||
content_type = "video/mp4"
|
||||
|
||||
formatted_date, milliseconds_timestamp = get_current_date_and_milliseconds()
|
||||
object_name = f"{dir_name}/{str(formatted_date)}/{str(milliseconds_timestamp)}-{file_name}"
|
||||
|
||||
# 2. 上传到 MinIO
|
||||
buffer = BytesIO(video_buff)
|
||||
client.put_object(
|
||||
bucket_name=bucket_name,
|
||||
object_name=object_name,
|
||||
data=buffer,
|
||||
length=len(video_buff),
|
||||
content_type=content_type, # 动态设置 MIME 类型
|
||||
)
|
||||
print(f"视频已上传至 {bucket_name}/{object_name}(格式: {video_format.upper()})")
|
||||
return object_name, "flv"
|
||||
|
||||
except S3Error as e:
|
||||
print(f"上传视频时出错: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def downFullPathFile(object_url):
|
||||
'''从MinIO全路径URL下载文件并返回本地路径'''
|
||||
if not object_url or not isinstance(object_url, str):
|
||||
logger.error(f"Invalid URL: {object_url}")
|
||||
return None
|
||||
|
||||
try:
|
||||
# 解析URL并提取存储桶和对象键
|
||||
parsed = urllib.parse.urlparse(object_url)
|
||||
path_parts = parsed.path.strip("/").split("/", 1)
|
||||
|
||||
if len(path_parts) < 2:
|
||||
logger.error(f"Invalid MinIO URL format: {object_url}")
|
||||
return None
|
||||
|
||||
bucket_name = path_parts[0]
|
||||
object_name = path_parts[1]
|
||||
|
||||
# 生成本地保存路径
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
download_path = os.path.join(current_dir, os.path.basename(object_name))
|
||||
os.makedirs(os.path.dirname(download_path), exist_ok=True)
|
||||
|
||||
# 执行下载
|
||||
client.fget_object(
|
||||
bucket_name=bucket_name,
|
||||
object_name=object_name,
|
||||
file_path=download_path
|
||||
)
|
||||
logger.info(f"Downloaded {object_url} to {download_path}")
|
||||
return download_path
|
||||
|
||||
except S3Error as e:
|
||||
logger.error(f"MinIO API error: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Download failed: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
def check_zip_size(object_name):
|
||||
"""检查MinIO中ZIP文件的大小"""
|
||||
bucket_name = "300bdf2b-a150-406e-be63-d28bd29b409f"
|
||||
try:
|
||||
stat = client.stat_object(bucket_name, object_name)
|
||||
size = stat.size
|
||||
logger.info(f"ZIP文件大小: {size/1024/1024:.2f}MB")
|
||||
return size
|
||||
except S3Error as e:
|
||||
logger.error(f"获取文件大小时出错: {e}")
|
||||
raise
|
||||
387
middleware/query_model.py
Normal file
@ -0,0 +1,387 @@
|
||||
import psycopg2
|
||||
from psycopg2.extras import RealDictCursor
|
||||
import json
|
||||
from typing import Dict, List, Union, Optional
|
||||
from dataclasses import dataclass, asdict
|
||||
from datetime import datetime
|
||||
import re
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelClassInfo:
|
||||
index: int
|
||||
name: str
|
||||
english_name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ClassConfig:
|
||||
filter_indices: List[int]
|
||||
class_indices: List[int]
|
||||
classes: List[ModelClassInfo]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelInfo:
|
||||
id: int
|
||||
yolo_version: str
|
||||
model_path: str
|
||||
func_description: Optional[str] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelMetadata:
|
||||
total_classes: int
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelData:
|
||||
id: int
|
||||
yolo_version: str
|
||||
model_path: str
|
||||
repeat_dis: float
|
||||
func_description: Optional[str]
|
||||
filter_indices: List[int]
|
||||
class_indices: List[int]
|
||||
conf: float
|
||||
classes: List[ModelClassInfo]
|
||||
total_classes: int
|
||||
cls_names: {}
|
||||
filtered_cls_en_dict: {}
|
||||
cls_en_dict: {}
|
||||
filtered_cls_dict: {}
|
||||
cls_dict: {}
|
||||
cls_str_dict: {}
|
||||
cls_zn_to_eh_dict: {}
|
||||
allowed_classes: []
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class MqttData:
|
||||
mqtt_id: int
|
||||
mqtt_ip: str
|
||||
mqtt_port: int
|
||||
mqtt_topic: str
|
||||
mqtt_username: str
|
||||
mqtt_pass: str
|
||||
mqtt_description: str
|
||||
org_code: str
|
||||
mqtt_type: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class Device:
|
||||
dname: str
|
||||
sn: str
|
||||
orgcode: int
|
||||
lat: float
|
||||
lng: float
|
||||
height: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelConfiguration:
|
||||
model_info: ModelInfo
|
||||
class_config: ClassConfig
|
||||
metadata: ModelMetadata
|
||||
|
||||
|
||||
@dataclass
|
||||
class Dataset:
|
||||
id: int
|
||||
resource_original_path: str
|
||||
pic_name: str
|
||||
local_path: str
|
||||
label_name: str
|
||||
label_content: str
|
||||
label_txt_path: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class Labels:
|
||||
id: int
|
||||
resource_original_path: str
|
||||
resource_id: int
|
||||
label_set_id: int
|
||||
label_ids: int
|
||||
annotation_data: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class Label_Yaml:
|
||||
id: int
|
||||
id_order:int
|
||||
name: str
|
||||
e_name: str
|
||||
|
||||
|
||||
class DateTimeEncoder(json.JSONEncoder):
|
||||
"""自定义JSON编码器,用于处理datetime对象"""
|
||||
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
return super().default(obj)
|
||||
|
||||
|
||||
class ModelConfigDAO:
|
||||
def __init__(self, db_params: Dict[str, str]):
|
||||
"""
|
||||
初始化数据库连接
|
||||
参数:
|
||||
db_params: 数据库连接参数,包含:
|
||||
- dbname: 数据库名
|
||||
- user: 用户名
|
||||
- password: 密码
|
||||
- host: 主机地址
|
||||
- port: 端口号
|
||||
"""
|
||||
self.db_params = db_params
|
||||
|
||||
def insert_config(self, config: ModelConfiguration) -> bool:
|
||||
"""
|
||||
插入新的模型配置
|
||||
|
||||
参数:
|
||||
config: 要插入的模型配置对象
|
||||
|
||||
返回:
|
||||
是否插入成功
|
||||
"""
|
||||
if not isinstance(config, ModelConfiguration):
|
||||
raise ValueError("Invalid configuration type")
|
||||
|
||||
# 将对象转换为数据库格式
|
||||
data = self._config_to_db_format(config)
|
||||
|
||||
query = """
|
||||
INSERT INTO ai_model (
|
||||
model_id, filter_cls, func_description,
|
||||
yolo_version, path, cls_index, cls, cls_en, cls_description
|
||||
) VALUES (
|
||||
%(model_id)s, %(filter_cls)s, %(func_description)s,
|
||||
%(yolo_version)s, %(path)s, %(cls_index)s, %(cls)s, %(cls_en)s, %(cls_description)s
|
||||
)
|
||||
"""
|
||||
|
||||
try:
|
||||
with psycopg2.connect(**self.db_params) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(query, data)
|
||||
conn.commit()
|
||||
return True
|
||||
except psycopg2.Error as e:
|
||||
print(f"Database insert error: {e}")
|
||||
return False
|
||||
|
||||
def update_config(self, config: ModelConfiguration) -> bool:
|
||||
"""
|
||||
更新现有的模型配置
|
||||
|
||||
参数:
|
||||
config: 要更新的模型配置对象
|
||||
|
||||
返回:
|
||||
是否更新成功
|
||||
"""
|
||||
if not isinstance(config, ModelConfiguration):
|
||||
raise ValueError("Invalid configuration type")
|
||||
|
||||
data = self._config_to_db_format(config)
|
||||
|
||||
query = """
|
||||
UPDATE ai_model SET
|
||||
filter_cls = %(filter_cls)s,
|
||||
func_description = %(func_description)s,
|
||||
yolo_version = %(yolo_version)s,
|
||||
path = %(path)s,
|
||||
cls_index = %(cls_index)s,
|
||||
cls = %(cls)s,
|
||||
cls_en = %(cls_en)s,
|
||||
cls_description = %(cls_description)s
|
||||
WHERE model_id = %(model_id)s
|
||||
"""
|
||||
|
||||
try:
|
||||
with psycopg2.connect(**self.db_params) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(query, data)
|
||||
conn.commit()
|
||||
return True
|
||||
except psycopg2.Error as e:
|
||||
print(f"Database update error: {e}")
|
||||
return False
|
||||
|
||||
def get_datasets(self, bz_training_task_id: int) -> List[Dataset]:
|
||||
"""
|
||||
获取并解析模型配置
|
||||
|
||||
参数:
|
||||
model_id: 模型功能ID
|
||||
|
||||
|
||||
返回:
|
||||
结构化的模型配置或None(如果未找到)
|
||||
"""
|
||||
query = """
|
||||
select bpra.id,bpra.resource_original_path from bz_training_dataset a left join bz_training_task b on b.id=a.trainingtaskid
|
||||
left join bz_datasets c on c.id =a.datasetid
|
||||
left join bz_dataset_project_relations d on d.data_set_id =c.id
|
||||
left join bz_project_resource_assignments bpra on bpra.project_id =d.project_id
|
||||
where b.id=%s
|
||||
"""
|
||||
|
||||
try:
|
||||
with psycopg2.connect(**self.db_params) as conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute(query, (bz_training_task_id,))
|
||||
rows = cur.fetchall()
|
||||
return [self._db_row_to_dataset(row) for row in rows] # 转换为Dataset列表
|
||||
except psycopg2.Error as e:
|
||||
print(f"Database query error: {e}")
|
||||
return None
|
||||
|
||||
def get_labels(self, bz_training_task_id: int) -> List[Labels]:
|
||||
"""
|
||||
获取并解析模型配置
|
||||
|
||||
参数:
|
||||
model_id: 模型功能ID
|
||||
|
||||
|
||||
返回:
|
||||
结构化的模型配置或None(如果未找到)
|
||||
"""
|
||||
query = """
|
||||
select bpra.id,bpra.resource_original_path ,bar.resource_id , bar.label_set_id ,bar.label_id ,bar.annotation_data from bz_training_dataset a left join bz_training_task b on b.id=a.trainingtaskid
|
||||
left join bz_datasets c on c.id =a.datasetid
|
||||
left join bz_dataset_project_relations d on d.data_set_id =c.id
|
||||
left join bz_project_resource_assignments bpra on bpra.project_id =d.project_id
|
||||
left join bz_annotation_record bar on bar.task_assignment_id =bpra.id
|
||||
where b.id=%s
|
||||
"""
|
||||
|
||||
try:
|
||||
with psycopg2.connect(**self.db_params) as conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute(query, (bz_training_task_id,))
|
||||
rows = cur.fetchall()
|
||||
return [self._db_row_to_labels(row) for row in rows] # 转换为Dataset列表
|
||||
except psycopg2.Error as e:
|
||||
print(f"Database query error: {e}")
|
||||
return None
|
||||
|
||||
def get_label_yaml(self, bz_training_task_id: int) -> List[Label_Yaml]:
|
||||
"""
|
||||
获取并解析模型配置
|
||||
|
||||
参数:
|
||||
model_id: 模型功能ID
|
||||
|
||||
|
||||
返回:
|
||||
结构化的模型配置或None(如果未找到)
|
||||
"""
|
||||
query = """
|
||||
select id,name,e_name from bz_labels where id in (select distinct(bar.label_id) AS id from bz_training_dataset a left join bz_training_task b on b.id=a.trainingtaskid
|
||||
left join bz_datasets c on c.id =a.datasetid
|
||||
left join bz_dataset_project_relations d on d.data_set_id =c.id
|
||||
left join bz_project_resource_assignments bpra on bpra.project_id =d.project_id
|
||||
left join bz_annotation_record bar on bar.task_assignment_id =bpra.id
|
||||
where b.id=%s
|
||||
)
|
||||
"""
|
||||
|
||||
try:
|
||||
with psycopg2.connect(**self.db_params) as conn:
|
||||
with conn.cursor(cursor_factory=RealDictCursor) as cur:
|
||||
cur.execute(query, (bz_training_task_id,))
|
||||
rows = cur.fetchall()
|
||||
return [self._db_row_to_label_yaml(row) for row in rows] # 转换为Dataset列表
|
||||
except psycopg2.Error as e:
|
||||
print(f"Database query error: {e}")
|
||||
return None
|
||||
|
||||
def _config_to_db_format(self, config: ModelConfiguration) -> Dict:
|
||||
"""将配置对象转换为数据库格式"""
|
||||
return {
|
||||
"model_id": config.model_info.id,
|
||||
"filter_cls": config.class_config.filter_indices,
|
||||
"func_description": config.model_info.func_description,
|
||||
"yolo_version": config.model_info.yolo_version,
|
||||
"path": config.model_info.model_path,
|
||||
"cls_index": config.class_config.class_indices,
|
||||
"cls": [cls_info.name for cls_info in config.class_config.classes],
|
||||
"cls_en": [cls_info.english_name for cls_info in config.class_config.classes],
|
||||
"cls_description": ", ".join(
|
||||
filter(None, [cls_info.description for cls_info in config.class_config.classes])
|
||||
)
|
||||
}
|
||||
|
||||
def insert_train_pid(self, task_id, train_pid) -> bool:
|
||||
"""
|
||||
插入新的训练记录(task_id 和 train_pid)
|
||||
|
||||
参数:
|
||||
config: 要插入的模型配置对象(需包含 task_id 和 train_pid)
|
||||
|
||||
返回:
|
||||
是否插入成功
|
||||
"""
|
||||
|
||||
insert_sql = """
|
||||
INSERT INTO bz_train_record (
|
||||
task_id, train_pid,create_time
|
||||
) VALUES (
|
||||
%s, %s,now()
|
||||
)
|
||||
"""
|
||||
|
||||
try:
|
||||
with psycopg2.connect(**self.db_params) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(insert_sql, (task_id, train_pid))
|
||||
conn.commit()
|
||||
return True
|
||||
except psycopg2.Error as e:
|
||||
print(f"Database insert error: {e}")
|
||||
return False
|
||||
|
||||
def _db_row_to_dataset(self, row: Dict) -> ModelConfiguration:
|
||||
|
||||
return Dataset(
|
||||
id=row["id"],
|
||||
resource_original_path=row["resource_original_path"],
|
||||
pic_name=None,
|
||||
local_path=None,
|
||||
label_name=None,
|
||||
label_content="",
|
||||
label_txt_path=None
|
||||
)
|
||||
|
||||
def _db_row_to_labels(self, row: Dict) -> ModelConfiguration:
|
||||
|
||||
return Labels(
|
||||
id=row["id"],
|
||||
resource_original_path=row["resource_original_path"],
|
||||
resource_id=row["resource_id"],
|
||||
label_set_id=row["label_set_id"],
|
||||
label_ids=row["label_id"],
|
||||
annotation_data=row["annotation_data"]
|
||||
)
|
||||
|
||||
def _db_row_to_label_yaml(self, row: Dict) -> ModelConfiguration:
|
||||
|
||||
return Label_Yaml(
|
||||
id=row["id"],
|
||||
id_order=-1,
|
||||
name=row["name"],
|
||||
e_name=row["e_name"]
|
||||
|
||||
)
|
||||
|
||||
24
middleware/util.py
Normal file
@ -0,0 +1,24 @@
|
||||
from datetime import datetime
|
||||
import time
|
||||
|
||||
|
||||
def get_current_date_and_milliseconds():
|
||||
# 获取当前日期和时间
|
||||
now = datetime.now()
|
||||
|
||||
# 格式化日期为 YYYYMMDD 格式
|
||||
formatted_date = now.strftime("%Y%m%d")
|
||||
|
||||
# 获取当前时间的时间戳,包含毫秒
|
||||
timestamp = time.time()
|
||||
|
||||
# 获取13位长度的时间戳(毫秒级)
|
||||
milliseconds_timestamp = int(timestamp * 1000)
|
||||
|
||||
return formatted_date, milliseconds_timestamp
|
||||
|
||||
|
||||
# # 获取当前日期和13位长度的时间戳
|
||||
# current_date, current_milliseconds = get_current_date_and_milliseconds()
|
||||
# print("Current date in YYYYMMDD format:", current_date)
|
||||
# print("13-digit timestamp (milliseconds):", current_milliseconds)
|
||||
BIN
pic/20250710-171103-972.jpg
Normal file
|
After Width: | Height: | Size: 695 KiB |
BIN
pic/20250710-171104-467.jpg
Normal file
|
After Width: | Height: | Size: 714 KiB |
BIN
pic/20250710-171109-415.jpg
Normal file
|
After Width: | Height: | Size: 720 KiB |
BIN
pic/20250710-171114-514.jpg
Normal file
|
After Width: | Height: | Size: 786 KiB |
BIN
pic/20250710-171115-031.jpg
Normal file
|
After Width: | Height: | Size: 792 KiB |
BIN
pic/20250710-171121-686.jpg
Normal file
|
After Width: | Height: | Size: 859 KiB |
BIN
pic/20250710-171122-172.jpg
Normal file
|
After Width: | Height: | Size: 830 KiB |
BIN
pic/20250710-171122-413.jpg
Normal file
|
After Width: | Height: | Size: 802 KiB |
BIN
pic/20250710-171124-882.jpg
Normal file
|
After Width: | Height: | Size: 845 KiB |
BIN
pic/20250710-171128-134.jpg
Normal file
|
After Width: | Height: | Size: 744 KiB |
BIN
pic/20250710-171142-614.jpg
Normal file
|
After Width: | Height: | Size: 743 KiB |
BIN
pic/20250710-171147-946.jpg
Normal file
|
After Width: | Height: | Size: 775 KiB |
BIN
pic/20250710-171153-749.jpg
Normal file
|
After Width: | Height: | Size: 763 KiB |
BIN
predict/__pycache__/predict_yolo11seg.cpython-312.pyc
Normal file
|
After Width: | Height: | Size: 1.5 MiB |
@ -0,0 +1 @@
|
||||
4 0.284375 0.568750 0.284375 0.996875 0.707812 0.996875 0.707812 0.568750 0.274603
|
||||
|
After Width: | Height: | Size: 15 KiB |
BIN
predict/dataset/predictions/20250711-111516-531_result.jpg
Normal file
|
After Width: | Height: | Size: 1.5 MiB |
@ -0,0 +1 @@
|
||||
4 0.284375 0.568750 0.284375 0.996875 0.707812 0.996875 0.707812 0.568750 0.274603
|
||||
|
After Width: | Height: | Size: 15 KiB |
|
After Width: | Height: | Size: 695 KiB |
|
After Width: | Height: | Size: 714 KiB |
|
After Width: | Height: | Size: 720 KiB |
|
After Width: | Height: | Size: 786 KiB |
|
After Width: | Height: | Size: 792 KiB |
|
After Width: | Height: | Size: 859 KiB |
|
After Width: | Height: | Size: 830 KiB |
|
After Width: | Height: | Size: 802 KiB |
|
After Width: | Height: | Size: 845 KiB |
|
After Width: | Height: | Size: 744 KiB |
|
After Width: | Height: | Size: 743 KiB |
|
After Width: | Height: | Size: 775 KiB |
|
After Width: | Height: | Size: 695 KiB |
|
After Width: | Height: | Size: 714 KiB |
|
After Width: | Height: | Size: 720 KiB |
|
After Width: | Height: | Size: 786 KiB |
|
After Width: | Height: | Size: 792 KiB |
|
After Width: | Height: | Size: 859 KiB |
|
After Width: | Height: | Size: 830 KiB |
|
After Width: | Height: | Size: 802 KiB |
|
After Width: | Height: | Size: 845 KiB |
|
After Width: | Height: | Size: 744 KiB |
|
After Width: | Height: | Size: 743 KiB |
|
After Width: | Height: | Size: 775 KiB |
|
After Width: | Height: | Size: 695 KiB |
|
After Width: | Height: | Size: 714 KiB |
|
After Width: | Height: | Size: 720 KiB |
|
After Width: | Height: | Size: 786 KiB |
|
After Width: | Height: | Size: 792 KiB |
|
After Width: | Height: | Size: 859 KiB |
|
After Width: | Height: | Size: 830 KiB |
|
After Width: | Height: | Size: 802 KiB |