-
Notifications
You must be signed in to change notification settings - Fork 146
Open
Description
In the training phase, the ground truth trajectory is added
to the planning vocabulary as the positive sample. Other
trajectories are regarded as negative samples. We assign
different loss weights to negative trajectories. Trajectories
close to the ground truth trajectory are less penalized.
原文中好像提到我们将gt轨迹加入词表作为正样本
def get_plan_expert_target(self,
ego_traj_preds,
ego_fut_gt,
ego_fut_masks,
ego_cls_expert_preds, # (N, 1)
plan_col_labels,
plan_bd_labels,
):
plan_expert_labels = torch.zeros((self.plan_fut_mode), dtype=torch.long,
device=ego_traj_preds.device)
plan_expert_labels_weight = torch.zeros((self.plan_fut_mode), dtype=ego_traj_preds.dtype,
device=ego_traj_preds.device)
if ego_fut_masks[0] == 1.:
neg_idx = torch.ones((self.plan_fut_mode), dtype=torch.bool,
device=ego_traj_preds.device)
#### v1
# traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1) \
# + torch.linalg.norm(ego_traj_preds[:,0,:] - ego_fut_gt.cumsum(dim=-2)[:,0,:], dim=-1) * 5.
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=2.) / 2.
#### v2
# traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1)
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=1.5) / 1.5
#### v3
# traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1)
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=10) / 1.5
# #### v4
# traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1)
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=15) / 1.5
# #### v5
# traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1)
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=30) / 1.5
#### v6
# traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1)
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=30) * 2.
#### v7
# traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1)
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=100.) * 2.
#### v8
# traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1)
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=100.) * 4.
#### v9
# traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1)
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=100.) * 10.
# #### v10
# traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1)
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=100.) * 20.
#### v11
traj_dis = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).sum(dim=-1)
plan_expert_labels[neg_idx] = 1
plan_expert_labels_weight[neg_idx] = torch.clip(traj_dis, min=0, max=100.) * 100.
plan_expert_labels[plan_col_labels == 1] = 1
plan_expert_labels[plan_bd_labels == 1] = 1
plan_expert_labels_weight[plan_col_labels == 1] = 100.
plan_expert_labels_weight[plan_bd_labels == 1] = 100.
# pos_idx = torch.linalg.norm(ego_traj_preds[:,:1,:] - ego_fut_gt.cumsum(dim=-2)[:,:1,:], dim=-1).mean(dim=-1).argmin()
pos_idx = traj_dis.argmin()
plan_expert_labels[pos_idx] = 0
# add weights to balance trajs
self.traj_selected_cnt[pos_idx] += 1.
scaling_rate = self.traj_selected_cnt.sum() / self.traj_selected_cnt[pos_idx] / self.plan_fut_mode
scaling_rate = torch.clamp(scaling_rate, 0.5, 2.)
plan_expert_labels_weight[pos_idx] = 100. # * scaling_rate
# global pos_idx_cnt
# pos_idx_cnt[pos_idx] += 1
#-------
# pos_idx = torch.linalg.norm(ego_traj_preds[:,:,:] - ego_fut_gt.cumsum(dim=-2)[:,:,:], dim=-1).mean(dim=-1).argmin()
# rank = (ego_cls_expert_preds[pos_idx] < ego_cls_expert_preds).sum()
# plan_expert_labels[pos_idx] = 0
# plan_expert_labels_weight[pos_idx] = 500. * min(rank, 10)
# neg_idx = torch.linalg.norm(ego_traj_preds[:,:1,:] - ego_fut_gt.cumsum(dim=-2)[:,:1,:], dim=-1).mean(dim=-1) > -10e6 # all
# plan_expert_labels[neg_idx] = 1
# plan_expert_labels_weight[neg_idx] = min(rank, 10) / self.plan_fut_mode
# plan_expert_labels[plan_col_labels == 1] = 1
# plan_expert_labels[plan_bd_labels == 1] = 1
# plan_expert_labels_weight[plan_col_labels == 1] = 1.
# plan_expert_labels_weight[plan_bd_labels == 1] = 1.
return plan_expert_labels, plan_expert_labels_weight
但源码中这一段看起来是将词表中距离模型最近的作为正样本,有大佬帮忙解惑吗
Reactions are currently unavailable
Metadata
Metadata
Assignees
Labels
No labels