@@ -30,8 +30,8 @@ class LSTMTest(tf.test.TestCase):
3030
3131  def  testLSTMSeq2Seq (self ):
3232    vocab_size  =  9 
33-     x  =  np .random .random_integers (1 , high = vocab_size   -   1 , size = (3 , 5 , 1 , 1 ))
34-     y  =  np .random .random_integers (1 , high = vocab_size   -   1 , size = (3 , 6 , 1 , 1 ))
33+     x  =  np .random .randint (1 , high = vocab_size , size = (3 , 5 , 1 , 1 ))
34+     y  =  np .random .randint (1 , high = vocab_size , size = (3 , 6 , 1 , 1 ))
3535    hparams  =  lstm .lstm_seq2seq ()
3636    p_hparams  =  problem_hparams .test_problem_hparams (vocab_size ,
3737                                                     vocab_size ,
@@ -50,8 +50,8 @@ def testLSTMSeq2Seq(self):
5050
5151  def  testLSTMSeq2SeqAttention (self ):
5252    vocab_size  =  9 
53-     x  =  np .random .random_integers (1 , high = vocab_size   -   1 , size = (3 , 5 , 1 , 1 ))
54-     y  =  np .random .random_integers (1 , high = vocab_size   -   1 , size = (3 , 6 , 1 , 1 ))
53+     x  =  np .random .randint (1 , high = vocab_size , size = (3 , 5 , 1 , 1 ))
54+     y  =  np .random .randint (1 , high = vocab_size , size = (3 , 6 , 1 , 1 ))
5555    hparams  =  lstm .lstm_attention ()
5656
5757    p_hparams  =  problem_hparams .test_problem_hparams (vocab_size ,
@@ -74,8 +74,8 @@ def testLSTMSeq2SeqAttention(self):
7474
7575  def  testLSTMSeq2seqBidirectionalEncoder (self ):
7676    vocab_size  =  9 
77-     x  =  np .random .random_integers (1 , high = vocab_size   -   1 , size = (3 , 5 , 1 , 1 ))
78-     y  =  np .random .random_integers (1 , high = vocab_size   -   1 , size = (3 , 6 , 1 , 1 ))
77+     x  =  np .random .randint (1 , high = vocab_size , size = (3 , 5 , 1 , 1 ))
78+     y  =  np .random .randint (1 , high = vocab_size , size = (3 , 6 , 1 , 1 ))
7979    hparams  =  lstm .lstm_seq2seq ()
8080    p_hparams  =  problem_hparams .test_problem_hparams (vocab_size ,
8181                                                     vocab_size ,
@@ -94,8 +94,8 @@ def testLSTMSeq2seqBidirectionalEncoder(self):
9494
9595  def  testLSTMSeq2seqAttentionBidirectionalEncoder (self ):
9696    vocab_size  =  9 
97-     x  =  np .random .random_integers (1 , high = vocab_size   -   1 , size = (3 , 5 , 1 , 1 ))
98-     y  =  np .random .random_integers (1 , high = vocab_size   -   1 , size = (3 , 6 , 1 , 1 ))
97+     x  =  np .random .randint (1 , high = vocab_size , size = (3 , 5 , 1 , 1 ))
98+     y  =  np .random .randint (1 , high = vocab_size , size = (3 , 6 , 1 , 1 ))
9999    hparams  =  lstm .lstm_attention ()
100100
101101    p_hparams  =  problem_hparams .test_problem_hparams (vocab_size , vocab_size )
0 commit comments