-
Notifications
You must be signed in to change notification settings - Fork 5
Description
Hi, thank you for your excellent work on DSPDet3D!
I'm trying to reproduce your results on the ScanNet-md40 benchmark using the official configuration provided in your paper and repo. However, I noticed that my results are slightly lower than reported.
Here are my key settings:
voxel_size = 0.01
pts_prune_threshold = 100000
r = 7, volume_threshold = 27
prune_threshold = 0.3
Backbone: DSPBackbone(depth=34, max_channels=128, pool=False)
Head: DSPHead(in_channels=(64, 128, 128, 128), out_channels=128, n_classes=22, n_reg_outs=6)
Training: 12 epochs with RepeatDataset(times=10), batch size 4 per GPU
The training and test pipelines match those from the paper, and I'm using the ScanNet-md40 split with 22 classes. However, the performance (especially [email protected]) is around 1-2% lower than the result reported in Table 1 of the paper, even under the same pruning threshold setting (e.g., τ=0.3).
Could you kindly advise on:
Whether there are any additional tricks or tuning (e.g., post-processing, test-time settings) not mentioned in the paper?
Which seed(s) or randomness control were used to stabilize the reported results?
Was the 25-trial average used for table reporting, or best of several runs?
Any insights would be greatly appreciated!
Thanks again for your contribution!
voxel_size = .01
n_points = 100000
model = dict(
type='DSPDet3D',
voxel_size=voxel_size,
backbone=dict(type='DSPBackbone', in_channels=3, max_channels=128, depth=34, pool=False, norm='batch'),
head=dict(
type='DSPHead',
in_channels=(64, 128, 128, 128),
out_channels=128,
n_reg_outs=6,
n_classes=22,
voxel_size=voxel_size,
pts_prune_threshold=100000,
assigner=dict(
type='DSPAssigner',
top_pts_threshold=6,
),
assign_type='volume',
volume_threshold=27,
r=7,
prune_threshold=0.3,
bbox_loss=dict(type='AxisAlignedIoULoss2', mode='diou', reduction='none')),
train_cfg=dict(),
test_cfg=dict(nms_pre=1000, iou_thr=.5, score_thr=.01))
optimizer = dict(type='AdamW', lr=.001, weight_decay=.0001)
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
lr_config = dict(policy='step', warmup=None, step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)]
checkpoint_config = dict(interval=1, max_keep_ckpts=12)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = None
load_from = None
resume_from = None
workflow = [('train', 1)]
n_points = 100000
dataset_type = 'ScanNetDataset'
data_root = 'data/ScanNet-md40/'
class_names = ('bathtub', 'bed', 'bench', 'bookshelf', 'bottle', 'chair', 'cup', 'curtain', 'desk', 'door', 'dresser',
'keyboard', 'lamp', 'laptop', 'monitor', 'night_stand', 'plant', 'sofa', 'stool', 'table', 'toilet',
'wardrobe')
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=False,
load_dim=6,
use_dim=[0, 1, 2]),
dict(type='LoadAnnotations3D'),
dict(type='GlobalAlignment', rotation_axis=2),
# we do not sample 100k points for scannet, as very few scenes have
# significantly more then 100k points. so we sample 33 to 100% of them
dict(type='PointSample', num_points=n_points),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=.5,
flip_ratio_bev_vertical=.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-.02, .02],
scale_ratio_range=[.9, 1.1],
translation_std=[.1, .1, .1],
shift_height=False),
# dict(type='NormalizePointsColor', color_mean=None),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=False,
load_dim=6,
use_dim=[0, 1, 2]),
dict(type='GlobalAlignment', rotation_axis=2),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
# we do not sample 100k points for scannet, as very few scenes have
# significantly more then 100k points. so it doesn't affect inference
# time and we ca accept all points
# dict(type='PointSample', num_points=n_points),
# dict(type='NormalizePointsColor', color_mean=None),
dict(
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type='RepeatDataset',
times=10,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + 'scannet_infos_train.pkl',
pipeline=train_pipeline,
filter_empty_gt=False,
classes=class_names,
box_type_3d='Depth')),
val=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + 'scannet_infos_val.pkl',
pipeline=test_pipeline,
classes=class_names,
test_mode=True,
box_type_3d='Depth'),
test=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + 'scannet_infos_val.pkl',
pipeline=test_pipeline,
classes=class_names,
test_mode=True,
box_type_3d='Depth'))

