Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Remove the '#' symbol in the header comment
Browse files Browse the repository at this point in the history
Remove the '#' symbol in the header comment
  • Loading branch information
cchung100m committed Feb 18, 2019
1 parent 46dea97 commit 6cded8b
Showing 1 changed file with 9 additions and 1 deletion.
10 changes: 9 additions & 1 deletion example/gluon/lstm_crf/lstm_crf.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

"""This example demonstrates how the LSTM-CRF model can be implemented
in Gluon to perform noun-phrase chunking as a sequence labeling task.
"""
import mxnet as mx
from mxnet import autograd as ag, ndarray as nd, gluon
from mxnet.gluon import Block, nn, rnn
Expand All @@ -26,23 +28,28 @@

mx.random.seed(1)


# Helper functions to make the code more readable.
def to_scalar(x):
return int(x.asscalar())


def argmax(vec):
# return the argmax as a python int
idx = nd.argmax(vec, axis=1)
return to_scalar(idx)


def prepare_sequence(seq, word2idx):
return nd.array([word2idx[w] for w in seq])


# Compute log sum exp is numerically more stable than multiplying probabilities
def log_sum_exp(vec):
max_score = nd.max(vec).asscalar()
return nd.log(nd.sum(nd.exp(vec - max_score))) + max_score


# Model
class BiLSTM_CRF(Block):
def __init__(self, vocab_size, tag2idx, embedding_dim, hidden_dim):
Expand Down Expand Up @@ -174,6 +181,7 @@ def forward(self, sentence): # dont confuse this with _forward_alg above.
score, tag_seq = self._viterbi_decode(lstm_feats)
return score, tag_seq


# Run training
START_TAG = "<START>"
STOP_TAG = "<STOP>"
Expand Down

0 comments on commit 6cded8b

Please sign in to comment.