Skip to content

Commit

Permalink
fix batchsize=1 backward error (PaddlePaddle#2973)
Browse files Browse the repository at this point in the history
  • Loading branch information
zhiboniu authored May 13, 2021
1 parent cb972a2 commit 33dca04
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 7 deletions.
4 changes: 2 additions & 2 deletions configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
use_gpu: true
log_iter: 1
log_iter: 10
save_dir: output
snapshot_epoch: 10
weights: output/higherhrnet_hrnet_v1_512/290
weights: output/higherhrnet_hrnet_w32_512/model_final
epoch: 300
num_joints: &num_joints 17
flip_perm: &flip_perm [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
Expand Down
7 changes: 3 additions & 4 deletions ppdet/modeling/heads/keypoint_hrhrnet_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,15 +92,14 @@ def forward(self, feats, targets=None):
xo2 = self.conv2(x2)
num_joints = self.num_joints
if self.training:
heatmap1, tagmap = paddle.split(xo1, 2, axis=1)
if self.swahr:
so1 = self.scalelayer0(x1)
so2 = self.scalelayer1(x2)
hrhrnet_outputs = ([xo1[:, :num_joints], so1], [xo2, so2],
xo1[:, num_joints:])
hrhrnet_outputs = ([heatmap1, so1], [xo2, so2], tagmap)
return self.loss(hrhrnet_outputs, targets)
else:
hrhrnet_outputs = (xo1[:, :num_joints], xo2,
xo1[:, num_joints:])
hrhrnet_outputs = (heatmap1, xo2, tagmap)
return self.loss(hrhrnet_outputs, targets)

# averaged heatmap, upsampled tagmap
Expand Down
5 changes: 4 additions & 1 deletion ppdet/modeling/losses/keypoint_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,10 @@ def apply_single(self, pred, tagmap):

def __call__(self, preds, tagmaps):
bs = preds.shape[0]
losses = [self.apply_single(preds[i], tagmaps[i]) for i in range(bs)]
losses = [
self.apply_single(preds[i:i + 1].squeeze(),
tagmaps[i:i + 1].squeeze()) for i in range(bs)
]
pull = self.pull_factor * sum(loss[0] for loss in losses) / len(losses)
push = self.push_factor * sum(loss[1] for loss in losses) / len(losses)
return pull, push
Expand Down

0 comments on commit 33dca04

Please sign in to comment.