1、When I run the following test code, I get a graphical error
python tools/test.py work_dirs/segtrack-frcnn_r50_fpn_12e_bdd10k_fixed_pcan/segtrack-frcnn_r50_fpn_12e_bdd10k_fixed_pcan.py work_dirs/segtrack-frcnn_r50_fpn_12e_bdd10k_fixed_pcan/latest.pth
--out work_dirs/resnest/result.pkl --format-only --show
error:
Traceback (most recent call last):
File "tools/test.py", line 164, in
main()
File "tools/test.py", line 153, in main
dataset.format_results(outputs, **kwargs)
File "/home/lin/anaconda3/envs/pcan/lib/python3.6/site-packages/mmdet/datasets/coco.py", line 350, in format_results
assert isinstance(results, list), 'results must be a list'
AssertionError: results must be a list
My configuration file is as follows
model = dict(
type='EMQuasiDenseMaskRCNNRefine',
pretrained=None,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=0.1111111111111111, loss_weight=1.0)),
roi_head=dict(
type='QuasiDenseSegRoIHeadRefine',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
track_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
track_head=dict(
type='QuasiDenseEmbedHead',
num_convs=4,
num_fcs=1,
embed_channels=256,
norm_cfg=dict(type='GN', num_groups=32),
loss_track=dict(type='MultiPosCrossEntropyLoss', loss_weight=0.25),
loss_track_aux=dict(
type='L2Loss',
neg_pos_ub=3,
pos_margin=0,
neg_margin=0.3,
hard_mining=True,
loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHeadPlus',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=8,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
double_train=False,
refine_head=dict(
type='EMMatchHeadPlus',
num_convs=4,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
upsample_method='deconv',
upsample_ratio=2,
num_classes=8,
pos_proto_num=10,
neg_proto_num=10,
stage_num=6,
conv_cfg=None,
norm_cfg=None,
mask_thr_binary=0.5,
match_score_thr=0.5,
with_mask_ref=False,
with_mask_key=True,
with_dilation=False,
loss_mask=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))),
tracker=dict(
type='QuasiDenseSegFeatEmbedTracker',
init_score_thr=0.7,
obj_score_thr=0.3,
match_score_thr=0.5,
memo_tracklet_frames=10,
memo_backdrop_frames=1,
memo_momentum=0.8,
nms_conf_thr=0.5,
nms_backdrop_iou_thr=0.3,
nms_class_iou_thr=0.7,
with_cats=True,
match_metric='bisoftmax'),
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False,
mask_size=28),
embed=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='CombinedSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=3,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(
type='IoUBalancedNegSampler',
floor_thr=-1,
floor_fraction=0,
num_bins=3)))),
test_cfg=dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.5,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)),
fixed=True)
dataset_type = 'BDDVideoDataset'
data_root = ''
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadMultiImagesFromFile'),
dict(
type='SeqLoadAnnotations',
with_bbox=True,
with_ins_id=True,
with_mask=True),
dict(type='SeqResize', img_scale=(1296, 720), keep_ratio=True),
dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5),
dict(
type='SeqNormalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='SeqPad', size_divisor=32),
dict(type='SeqDefaultFormatBundle'),
dict(
type='SeqCollect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_match_indices', 'gt_masks'],
ref_prefix='ref')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1296, 720),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='VideoCollect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=[
dict(
type='BDDVideoDataset',
ann_file='/media/lin/文件/bdd/labels/seg_track_train_cocoformat.json',
img_prefix='/media/lin/文件/bdd/images/seg_track_20/train',
key_img_sampler=dict(interval=1),
ref_img_sampler=dict(num_ref_imgs=1, scope=3, method='uniform'),
pipeline=[
dict(type='LoadMultiImagesFromFile'),
dict(
type='SeqLoadAnnotations',
with_bbox=True,
with_ins_id=True,
with_mask=True),
dict(type='SeqResize', img_scale=(1296, 720), keep_ratio=True),
dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5),
dict(
type='SeqNormalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='SeqPad', size_divisor=32),
dict(type='SeqDefaultFormatBundle'),
dict(
type='SeqCollect',
keys=[
'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices',
'gt_masks'
],
ref_prefix='ref')
])
],
val=dict(
type='BDDVideoDataset',
ann_file='/media/lin/文件/bdd/labels/seg_track_val_cocoformat.json',
img_prefix='/media/lin/文件/bdd/images/seg_track_20/val',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1296, 720),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='VideoCollect', keys=['img'])
])
]),
test=dict(
type='BDDVideoDataset',
ann_file='/media/lin/文件/bdd/labels/seg_track_test_cocoformat.json',
img_prefix='/media/lin/文件/bdd/images/seg_track_20/test',
pipeline=[
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1296, 720),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='VideoCollect', keys=['img'])
])
]))
optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.001,
step=[8, 11])
checkpoint_config = dict(interval=1)
log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = './ckpts/segtrack-fixed-new.pth'
resume_from = None
workflow = [('train', 1)]
evaluation = dict(metric=['bbox', 'segm', 'segtrack'], interval=12)
work_dir = './work_dirs/segtrack-frcnn_r50_fpn_12e_bdd10k_fixed_pcan'
gpu_ids = range(0, 1)
2、When I run the following test code, I get a graphical error
python tools/test.py work_dirs/segtrack-frcnn_r50_fpn_12e_bdd10k_fixed_pcan/segtrack-frcnn_r50_fpn_12e_bdd10k_fixed_pcan.py work_dirs/segtrack-frcnn_r50_fpn_12e_bdd10k_fixed_pcan/latest.pth
--eval bbox segm segtrack
Error
Traceback (most recent call last):
File "tools/test.py", line 164, in
main()
File "tools/test.py", line 160, in main
print(dataset.evaluate(outputs, **eval_kwargs))
File "/home/lin/Desktop/pcan/pcan/datasets/coco_video_dataset.py", line 317, in evaluate
class_average=mot_class_average)
File "/home/lin/Desktop/pcan/pcan/core/evaluation/mots.py", line 30, in eval_mots
preprocessResult(all_results, anns, cats_mapping)
File "/home/lin/Desktop/pcan/pcan/core/evaluation/mot.py", line 48, in preprocessResult
for i, bbox in enumerate(anns['annotations']): # 枚举,i is index ,line is content
KeyError: 'annotations'
3、When I changed ResNet to ResNest during the training, I did not modify the other parts, and the verification accuracy after the training was all 0
After changing the backbone network, what other locations need to be changed....