From ddc7e6e40de1341fa4c17720b4d50f61eb7be8ec Mon Sep 17 00:00:00 2001 From: sunjiahao1999 <578431509@qq.com> Date: Mon, 22 Jan 2024 11:33:13 +0800 Subject: [PATCH] fix pgd config --- .../datasets/waymoD3-fov-mono3d-3class.py | 3 +- .../datasets/waymoD3-mv-mono3d-3class.py | 1 - .../datasets/waymoD5-fov-mono3d-3class.py | 163 ------------------ .../datasets/waymoD5-mv-mono3d-3class.py | 163 ------------------ ...1_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py | 2 +- ...01_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py | 2 +- 6 files changed, 3 insertions(+), 331 deletions(-) delete mode 100644 configs/_base_/datasets/waymoD5-fov-mono3d-3class.py delete mode 100644 configs/_base_/datasets/waymoD5-mv-mono3d-3class.py diff --git a/configs/_base_/datasets/waymoD3-fov-mono3d-3class.py b/configs/_base_/datasets/waymoD3-fov-mono3d-3class.py index b0142b9c94..3dd4289a7a 100644 --- a/configs/_base_/datasets/waymoD3-fov-mono3d-3class.py +++ b/configs/_base_/datasets/waymoD3-fov-mono3d-3class.py @@ -175,8 +175,7 @@ type='WaymoMetric', waymo_bin_file='./data/waymo/waymo_format/fov_gt.bin', metric='LET_mAP', - load_type='fov_image_based', - result_prefix='./pgd_fov_pred') + load_type='fov_image_based') test_evaluator = val_evaluator vis_backends = [dict(type='LocalVisBackend')] diff --git a/configs/_base_/datasets/waymoD3-mv-mono3d-3class.py b/configs/_base_/datasets/waymoD3-mv-mono3d-3class.py index af245effa7..27d32bb832 100644 --- a/configs/_base_/datasets/waymoD3-mv-mono3d-3class.py +++ b/configs/_base_/datasets/waymoD3-mv-mono3d-3class.py @@ -175,7 +175,6 @@ waymo_bin_file='./data/waymo/waymo_format/cam_gt.bin', metric='LET_mAP', load_type='mv_image_based', - result_prefix='./pgd_mv_pred', nms_cfg=dict( use_rotate_nms=True, nms_across_levels=False, diff --git a/configs/_base_/datasets/waymoD5-fov-mono3d-3class.py b/configs/_base_/datasets/waymoD5-fov-mono3d-3class.py deleted file mode 100644 index 614b6a9efa..0000000000 --- a/configs/_base_/datasets/waymoD5-fov-mono3d-3class.py +++ /dev/null @@ -1,163 +0,0 @@ -# dataset settings -# D3 in the config name means the whole dataset is divided into 3 folds -# We only use one fold for efficient experiments -dataset_type = 'WaymoDataset' -data_root = 'data/waymo/kitti_format/' -class_names = ['Car', 'Pedestrian', 'Cyclist'] -input_modality = dict(use_lidar=False, use_camera=True) - -# Example to use different file client -# Method 1: simply set the data root and let the file I/O module -# automatically infer from prefix (not support LMDB and Memcache yet) - -# data_root = 's3://openmmlab/datasets/detection3d/waymo/kitti_format/' - -# Method 2: Use backend_args, file_client_args in versions before 1.1.0 -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# './data/': 's3://openmmlab/datasets/detection3d/', -# 'data/': 's3://openmmlab/datasets/detection3d/' -# })) -backend_args = None - -train_pipeline = [ - dict(type='LoadImageFromFileMono3D', backend_args=backend_args), - dict( - type='LoadAnnotations3D', - with_bbox=True, - with_label=True, - with_attr_label=False, - with_bbox_3d=True, - with_label_3d=True, - with_bbox_depth=True), - # base shape (1248, 832), scale (0.95, 1.05) - dict( - type='RandomResize3D', - scale=(1284, 832), - ratio_range=(0.95, 1.05), - keep_ratio=True, - ), - dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), - dict( - type='Pack3DDetInputs', - keys=[ - 'img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_bboxes_3d', - 'gt_labels_3d', 'centers_2d', 'depths' - ]), -] - -test_pipeline = [ - dict(type='LoadImageFromFileMono3D', backend_args=backend_args), - dict( - type='RandomResize3D', - scale=(1248, 832), - ratio_range=(1., 1.), - keep_ratio=True), - dict(type='Pack3DDetInputs', keys=['img']), -] -# construct a pipeline for data and gt loading in show function -# please keep its loading function consistent with test_pipeline (e.g. client) -eval_pipeline = [ - dict(type='LoadImageFromFileMono3D', backend_args=backend_args), - dict( - type='RandomResize3D', - scale=(1248, 832), - ratio_range=(1., 1.), - keep_ratio=True), - dict(type='Pack3DDetInputs', keys=['img']), -] - -metainfo = dict(CLASSES=class_names) - -train_dataloader = dict( - batch_size=3, - num_workers=3, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - ann_file='waymo_infos_train.pkl', - data_prefix=dict( - pts='training/velodyne', - CAM_FRONT='training/image_0', - CAM_FRONT_LEFT='training/image_1', - CAM_FRONT_RIGHT='training/image_2', - CAM_SIDE_LEFT='training/image_3', - CAM_SIDE_RIGHT='training/image_4'), - pipeline=train_pipeline, - modality=input_modality, - test_mode=False, - metainfo=metainfo, - # we use box_type_3d='LiDAR' in kitti and nuscenes dataset - # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='Camera', - load_type='fov_image_based', - # load one frame every three frames - load_interval=5, - backend_args=backend_args)) - -val_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_prefix=dict( - pts='training/velodyne', - CAM_FRONT='training/image_0', - CAM_FRONT_LEFT='training/image_1', - CAM_FRONT_RIGHT='training/image_2', - CAM_SIDE_LEFT='training/image_3', - CAM_SIDE_RIGHT='training/image_4'), - ann_file='waymo_infos_val.pkl', - pipeline=eval_pipeline, - modality=input_modality, - test_mode=True, - metainfo=metainfo, - # we use box_type_3d='LiDAR' in kitti and nuscenes dataset - # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='Camera', - load_type='fov_image_based', - backend_args=backend_args)) - -test_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_prefix=dict( - pts='training/velodyne', - CAM_FRONT='training/image_0', - CAM_FRONT_LEFT='training/image_1', - CAM_FRONT_RIGHT='training/image_2', - CAM_SIDE_LEFT='training/image_3', - CAM_SIDE_RIGHT='training/image_4'), - ann_file='waymo_infos_val.pkl', - pipeline=eval_pipeline, - modality=input_modality, - test_mode=True, - metainfo=metainfo, - # we use box_type_3d='LiDAR' in kitti and nuscenes dataset - # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='Camera', - load_type='fov_image_based', - backend_args=backend_args)) - -val_evaluator = dict( - type='WaymoMetric', - ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', - waymo_bin_file='./data/waymo/waymo_format/fov_gt.bin', - data_root='./data/waymo/waymo_format', - metric='LET_mAP', - load_type='fov_image_based', - backend_args=backend_args) -test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/waymoD5-mv-mono3d-3class.py b/configs/_base_/datasets/waymoD5-mv-mono3d-3class.py deleted file mode 100644 index 0840d5eab1..0000000000 --- a/configs/_base_/datasets/waymoD5-mv-mono3d-3class.py +++ /dev/null @@ -1,163 +0,0 @@ -# dataset settings -# D3 in the config name means the whole dataset is divided into 3 folds -# We only use one fold for efficient experiments -dataset_type = 'WaymoDataset' -data_root = 'data/waymo/kitti_format/' -class_names = ['Car', 'Pedestrian', 'Cyclist'] -input_modality = dict(use_lidar=False, use_camera=True) - -# Example to use different file client -# Method 1: simply set the data root and let the file I/O module -# automatically infer from prefix (not support LMDB and Memcache yet) - -# data_root = 's3://openmmlab/datasets/detection3d/waymo/kitti_format/' - -# Method 2: Use backend_args, file_client_args in versions before 1.1.0 -# backend_args = dict( -# backend='petrel', -# path_mapping=dict({ -# './data/': 's3://openmmlab/datasets/detection3d/', -# 'data/': 's3://openmmlab/datasets/detection3d/' -# })) -backend_args = None - -train_pipeline = [ - dict(type='LoadImageFromFileMono3D', backend_args=backend_args), - dict( - type='LoadAnnotations3D', - with_bbox=True, - with_label=True, - with_attr_label=False, - with_bbox_3d=True, - with_label_3d=True, - with_bbox_depth=True), - # base shape (1248, 832), scale (0.95, 1.05) - dict( - type='RandomResize3D', - scale=(1284, 832), - ratio_range=(0.95, 1.05), - keep_ratio=True, - ), - dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), - dict( - type='Pack3DDetInputs', - keys=[ - 'img', 'gt_bboxes', 'gt_bboxes_labels', 'gt_bboxes_3d', - 'gt_labels_3d', 'centers_2d', 'depths' - ]), -] - -test_pipeline = [ - dict(type='LoadImageFromFileMono3D', backend_args=backend_args), - dict( - type='RandomResize3D', - scale=(1248, 832), - ratio_range=(1., 1.), - keep_ratio=True), - dict(type='Pack3DDetInputs', keys=['img']), -] -# construct a pipeline for data and gt loading in show function -# please keep its loading function consistent with test_pipeline (e.g. client) -eval_pipeline = [ - dict(type='LoadImageFromFileMono3D', backend_args=backend_args), - dict( - type='RandomResize3D', - scale=(1248, 832), - ratio_range=(1., 1.), - keep_ratio=True), - dict(type='Pack3DDetInputs', keys=['img']), -] - -metainfo = dict(classes=class_names) - -train_dataloader = dict( - batch_size=3, - num_workers=3, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - data_root=data_root, - ann_file='waymo_infos_train.pkl', - data_prefix=dict( - pts='training/velodyne', - CAM_FRONT='training/image_0', - CAM_FRONT_LEFT='training/image_1', - CAM_FRONT_RIGHT='training/image_2', - CAM_SIDE_LEFT='training/image_3', - CAM_SIDE_RIGHT='training/image_4'), - pipeline=train_pipeline, - modality=input_modality, - test_mode=False, - metainfo=metainfo, - # we use box_type_3d='LiDAR' in kitti and nuscenes dataset - # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='Camera', - load_type='mv_image_based', - # load one frame every three frames - load_interval=5, - backend_args=backend_args)) - -val_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_prefix=dict( - pts='training/velodyne', - CAM_FRONT='training/image_0', - CAM_FRONT_LEFT='training/image_1', - CAM_FRONT_RIGHT='training/image_2', - CAM_SIDE_LEFT='training/image_3', - CAM_SIDE_RIGHT='training/image_4'), - ann_file='waymo_infos_val.pkl', - pipeline=eval_pipeline, - modality=input_modality, - test_mode=True, - metainfo=metainfo, - # we use box_type_3d='LiDAR' in kitti and nuscenes dataset - # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='Camera', - load_type='mv_image_based', - backend_args=backend_args)) - -test_dataloader = dict( - batch_size=1, - num_workers=1, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - data_root=data_root, - data_prefix=dict( - pts='training/velodyne', - CAM_FRONT='training/image_0', - CAM_FRONT_LEFT='training/image_1', - CAM_FRONT_RIGHT='training/image_2', - CAM_SIDE_LEFT='training/image_3', - CAM_SIDE_RIGHT='training/image_4'), - ann_file='waymo_infos_val.pkl', - pipeline=eval_pipeline, - modality=input_modality, - test_mode=True, - metainfo=metainfo, - # we use box_type_3d='LiDAR' in kitti and nuscenes dataset - # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='Camera', - load_type='mv_image_based', - backend_args=backend_args)) - -val_evaluator = dict( - type='WaymoMetric', - ann_file='./data/waymo/kitti_format/waymo_infos_val.pkl', - waymo_bin_file='./data/waymo/waymo_format/cam_gt.bin', - data_root='./data/waymo/waymo_format', - metric='LET_mAP', - load_type='mv_image_based', - backend_args=backend_args) -test_evaluator = val_evaluator diff --git a/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py b/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py index fa50e0e04f..004f9fb4d3 100644 --- a/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py +++ b/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-fov-mono3d.py @@ -1,5 +1,5 @@ _base_ = [ - '../_base_/datasets/waymoD5-fov-mono3d-3class.py', + '../_base_/datasets/waymoD3-fov-mono3d-3class.py', '../_base_/models/pgd.py', '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' ] diff --git a/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py b/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py index 034f866006..a86229e943 100644 --- a/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py +++ b/configs/pgd/pgd_r101_fpn-head_dcn_16xb3_waymoD5-mv-mono3d.py @@ -1,5 +1,5 @@ _base_ = [ - '../_base_/datasets/waymoD5-mv-mono3d-3class.py', + '../_base_/datasets/waymoD3-mv-mono3d-3class.py', '../_base_/models/pgd.py', '../_base_/schedules/mmdet-schedule-1x.py', '../_base_/default_runtime.py' ]