Skip to content

Commit

Permalink
[Fix] Replace value.numpy()[0] with float(value) (PaddlePaddle#2686)
Browse files Browse the repository at this point in the history
* To support 0 dim Tensor, replace value.numpy()[0] with float(value) when the shape of value is [1]
  • Loading branch information
juncaipeng authored Nov 1, 2022
1 parent 0ef86a9 commit 4a2f396
Show file tree
Hide file tree
Showing 13 changed files with 30 additions and 30 deletions.
2 changes: 1 addition & 1 deletion Matting/ppmatting/core/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def train(model,
model.clear_gradients()

for key, value in loss_dict.items():
avg_loss[key] += value.numpy()[0]
avg_loss[key] += float(value)
batch_cost_averager.record(
time.time() - batch_start, num_samples=batch_size)

Expand Down
4 changes: 2 additions & 2 deletions Matting/ppmatting/core/val_ml.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,10 @@ def reverse_transform(alpha, trans_info):
"""recover pred to origin shape"""
for item in trans_info[::-1]:
if item[0][0] == 'resize':
h, w = item[1][0].numpy()[0], item[1][1].numpy()[0]
h, w = int(item[1][0]), int(item[1][1])
alpha = cv2.resize(alpha, dsize=(w, h))
elif item[0][0] == 'padding':
h, w = item[1][0].numpy()[0], item[1][1].numpy()[0]
h, w = int(item[1][0]), int(item[1][1])
alpha = alpha[0:h, 0:w]
else:
raise Exception("Unexpected info '{}' in im_info".format(item[0]))
Expand Down
2 changes: 1 addition & 1 deletion contrib/AutoNUE/scripts/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ def train(model,
paddle.optimizer.lr.LRScheduler):
optimizer._learning_rate.step()
model.clear_gradients()
avg_loss += loss.numpy()[0]
avg_loss += float(loss)
if not avg_loss_list:
avg_loss_list = [l.numpy() for l in loss_list]
else:
Expand Down
2 changes: 1 addition & 1 deletion contrib/CityscapesSOTA/scripts/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def train(model,
paddle.optimizer.lr.LRScheduler):
optimizer._learning_rate.step()
model.clear_gradients()
avg_loss += loss.numpy()[0]
avg_loss += float(loss)
if not avg_loss_list:
avg_loss_list = [l.numpy() for l in loss_list]
else:
Expand Down
28 changes: 14 additions & 14 deletions contrib/DomainAdaptation/script/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,8 @@ def train(self,
labels_src)

loss_src_seg = loss_src_seg_main + loss_src_seg_aux
loss_dict["source_main"] = loss_src_seg_main.numpy()[0]
loss_dict["source_aux"] = loss_src_seg_aux.numpy()[0]
loss_dict["source_main"] = float(loss_src_seg_main)
loss_dict["source_aux"] = float(loss_src_seg_aux)
loss = loss_src_seg
del loss_src_seg, loss_src_seg_aux, loss_src_seg_main

Expand Down Expand Up @@ -224,8 +224,8 @@ def train(self,

loss += loss_edge

loss_dict['target_edge'] = loss_tgt_edge.numpy()[0]
loss_dict['source_edge'] = loss_src_edge.numpy()[0]
loss_dict['target_edge'] = float(loss_tgt_edge)
loss_dict['source_edge'] = float(loss_src_edge)

del loss_edge, loss_tgt_edge, loss_src_edge

Expand Down Expand Up @@ -260,8 +260,8 @@ def train(self,

loss += loss_tgt_aug

loss_dict['target_aug_main'] = loss_tgt_aug_main.numpy()[0]
loss_dict['target_aug_aux'] = loss_tgt_aug_aux.numpy()[0]
loss_dict['target_aug_main'] = float(loss_tgt_aug_main)
loss_dict['target_aug_aux'] = float(loss_tgt_aug_aux)
del images_tgt_aug, labels_tgt_aug_aux, images_tgt, \
loss_tgt_aug, loss_tgt_aug_aux, loss_tgt_aug_main

Expand All @@ -287,8 +287,8 @@ def train(self,
loss_edge_rec = loss_tgt_edge_rec + loss_src_edge_rec
loss += loss_edge_rec

loss_dict['src_edge_rec'] = loss_src_edge_rec.numpy()[0]
loss_dict['tgt_edge_rec'] = loss_tgt_edge_rec.numpy()[0]
loss_dict['src_edge_rec'] = float(loss_src_edge_rec)
loss_dict['tgt_edge_rec'] = float(loss_tgt_edge_rec)

del loss_tgt_edge_rec, loss_src_edge_rec

Expand Down Expand Up @@ -358,12 +358,12 @@ def train(self,

loss += loss_feat_align

loss_dict['loss_pix_align_src'] = \
loss_pix_align_src.numpy()[0]
loss_dict['loss_pix_align_tgt'] = \
loss_pix_align_tgt.numpy()[0]
loss_dict['loss_intra_relate'] = \
loss_intra_relate.numpy()[0]
loss_dict['loss_pix_align_src'] = float(
loss_pix_align_src)
loss_dict['loss_pix_align_tgt'] = float(
loss_pix_align_tgt)
loss_dict['loss_intra_relate'] = float(
loss_intra_relate)

del loss_pix_align_tgt, loss_pix_align_src, loss_intra_relate,

Expand Down
2 changes: 1 addition & 1 deletion contrib/LaneSeg/core/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def train(model,
train_profiler.add_profiler_step(profiler_options)

model.clear_gradients()
avg_loss += loss.numpy()[0]
avg_loss += float(loss)
if not avg_loss_list:
avg_loss_list = [l.numpy() for l in loss_list]
else:
Expand Down
2 changes: 1 addition & 1 deletion contrib/MedicalSeg/medicalseg/core/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def train(model,
model.clear_gradients()
# TODO use a function to record, print lossetc

avg_loss += loss.numpy()[0]
avg_loss += float(loss)
mdice += np.mean(per_channel_dice) * 100

if channel_dice_array.size == 0:
Expand Down
2 changes: 1 addition & 1 deletion contrib/MedicalSeg/nnunet/core/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def train(model,
train_profiler.add_profiler_step(profiler_options)

model.clear_gradients()
avg_loss += loss.numpy()[0]
avg_loss += float(loss)
mdice += np.mean(per_channel_dice) * 100

if channel_dice_array.size == 0:
Expand Down
2 changes: 1 addition & 1 deletion contrib/PanopticDeepLab/core/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ def train(model,
paddle.optimizer.lr.LRScheduler):
optimizer._learning_rate.step()
model.clear_gradients()
avg_loss += loss.numpy()[0]
avg_loss += float(loss)
if not avg_loss_list:
avg_loss_list = [l.numpy() for l in loss_list]
else:
Expand Down
8 changes: 4 additions & 4 deletions deploy/slim/distill/distill_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,10 +244,10 @@ def distill_train(distill_model,
lr_sche.step()

distill_model.clear_gradients()
avg_loss += loss.numpy()[0]
avg_out_loss += out_loss.numpy()[0]
avg_out_distill_loss += out_distill_loss.numpy()[0]
avg_feature_distill_loss += feature_distill_loss.numpy()[0]
avg_loss += float(loss)
avg_out_loss += float(out_loss)
avg_out_distill_loss += float(out_distill_loss)
avg_feature_distill_loss += float(feature_distill_loss)
if not avg_out_loss_list:
avg_out_loss_list = [l.numpy() for l in out_loss_list]
else:
Expand Down
2 changes: 1 addition & 1 deletion paddleseg/core/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def train(model,
train_profiler.add_profiler_step(profiler_options)

model.clear_gradients()
avg_loss += loss.numpy()[0]
avg_loss += float(loss)
if not avg_loss_list:
avg_loss_list = [l.numpy() for l in loss_list]
else:
Expand Down
2 changes: 1 addition & 1 deletion paddleseg/models/losses/ohem_cross_entropy_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def forward(self, logit, label):
if self.min_kept > 0:
index = prob.argsort()
threshold_index = index[min(len(index), self.min_kept) - 1]
threshold_index = int(threshold_index.numpy()[0])
threshold_index = int(threshold_index)
if prob[threshold_index] > self.thresh:
threshold = prob[threshold_index]
kept_mask = (prob < threshold).astype('int64')
Expand Down
2 changes: 1 addition & 1 deletion paddleseg/models/losses/ohem_edge_attention_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def forward(self, logits, label):
if self.min_kept > 0:
index = prob.argsort()
threshold_index = index[min(len(index), self.min_kept) - 1]
threshold_index = int(threshold_index.numpy()[0])
threshold_index = int(threshold_index)
if prob[threshold_index] > self.thresh:
threshold = prob[threshold_index]
kept_mask = (prob < threshold).astype('int64')
Expand Down

0 comments on commit 4a2f396

Please sign in to comment.