From bfe96009010b85ee0005bc3ba1dda7560ec7b1ee Mon Sep 17 00:00:00 2001 From: Xingjian Shi Date: Sun, 13 Oct 2019 19:11:31 -0700 Subject: [PATCH] try to fix try to fix --- .../self_attentive_sentence_embedding.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/examples/sentence_embedding/self_attentive_sentence_embedding.md b/docs/examples/sentence_embedding/self_attentive_sentence_embedding.md index 4e4146693b..c0d971e48a 100644 --- a/docs/examples/sentence_embedding/self_attentive_sentence_embedding.md +++ b/docs/examples/sentence_embedding/self_attentive_sentence_embedding.md @@ -256,24 +256,24 @@ When the number of samples for labels are very unbalanced, applying different we ```{.python .input} -class WeightedSoftmaxCE(nn.HybridBlock): +class WeightedSoftmaxCE(nn.Block): def __init__(self, sparse_label=True, from_logits=False, **kwargs): super(WeightedSoftmaxCE, self).__init__(**kwargs) with self.name_scope(): self.sparse_label = sparse_label self.from_logits = from_logits - def hybrid_forward(self, F, pred, label, class_weight, depth=None): + def forward(self, pred, label, class_weight, depth=None): if self.sparse_label: - label = F.reshape(label, shape=(-1, )) - label = F.one_hot(label, depth) + label = nd.reshape(label, shape=(-1, )) + label = nd.one_hot(label, depth) if not self.from_logits: - pred = F.log_softmax(pred, -1) + pred = nd.log_softmax(pred, -1) - weight_label = F.broadcast_mul(label, class_weight) - loss = -F.sum(pred * weight_label, axis=-1) + weight_label = nd.broadcast_mul(label, class_weight) + loss = -nd.sum(pred * weight_label, axis=-1) - # return F.mean(loss, axis=0, exclude=True) + # return nd.mean(loss, axis=0, exclude=True) return loss ```