diff --git a/tensor2tensor/data_generators/all_problems.py b/tensor2tensor/data_generators/all_problems.py index c1f54f18f..24d1fffed 100644 --- a/tensor2tensor/data_generators/all_problems.py +++ b/tensor2tensor/data_generators/all_problems.py @@ -19,6 +19,7 @@ from __future__ import print_function import importlib +from six.moves import range # pylint: disable=redefined-builtin MODULES = [ "tensor2tensor.data_generators.algorithmic", @@ -97,7 +98,7 @@ def _is_import_err_msg(err_str, module): parts = module.split(".") - suffixes = [".".join(parts[i:]) for i in xrange(len(parts))] + suffixes = [".".join(parts[i:]) for i in range(len(parts))] return err_str in ( ["No module named %s" % suffix for suffix in suffixes] + ["No module named '%s'" % suffix for suffix in suffixes]) diff --git a/tensor2tensor/data_generators/generator_utils.py b/tensor2tensor/data_generators/generator_utils.py index 8a722101a..aae3e5572 100644 --- a/tensor2tensor/data_generators/generator_utils.py +++ b/tensor2tensor/data_generators/generator_utils.py @@ -969,11 +969,11 @@ def random_deinterleave(text, separator_symbol="X"): cut = [False] * n cut[0] = True num_cuts = int(math.exp(random.uniform(0, math.log(n)))) - for _ in xrange(num_cuts): + for _ in range(num_cuts): cut[random.randint(1, n -1)] = True out = [[], []] part = random.randint(0, 1) - for i in xrange(n): + for i in range(n): if cut[i]: out[part].append(separator_symbol) part = 1 - part diff --git a/tensor2tensor/data_generators/transduction_problems.py b/tensor2tensor/data_generators/transduction_problems.py index 6e1cff743..37082dcbf 100644 --- a/tensor2tensor/data_generators/transduction_problems.py +++ b/tensor2tensor/data_generators/transduction_problems.py @@ -33,7 +33,7 @@ import os import random -from six.moves import xrange # pylint: disable=redefined-builtin +from six.moves import range # pylint: disable=redefined-builtin from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import text_encoder @@ -123,7 +123,7 @@ def sequence_length(self, dataset_split): self.max_sequence_length(dataset_split)) def build_vocab(self): - return ["sym_%d" % i for i in xrange(1, self.num_symbols + 1)] + return ["sym_%d" % i for i in range(1, self.num_symbols + 1)] def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): vocab_filename = os.path.join(data_dir, self.vocab_filename) @@ -144,7 +144,7 @@ def transpose_sequence(self, input_sequence): raise NotImplementedError() def generate_samples(self, data_dir, tmp_dir, dataset_split): - for _ in xrange(self.num_samples(dataset_split)): + for _ in range(self.num_samples(dataset_split)): source = self.generate_random_sequence(dataset_split) target = self.transpose_sequence(source) yield { diff --git a/tensor2tensor/models/research/vqa_self_attention.py b/tensor2tensor/models/research/vqa_self_attention.py index 4702b598f..c9ae04680 100644 --- a/tensor2tensor/models/research/vqa_self_attention.py +++ b/tensor2tensor/models/research/vqa_self_attention.py @@ -19,7 +19,7 @@ from __future__ import division from __future__ import print_function -from six.moves import xrange +from six.moves import range # pylint: disable=redefined-builtin from tensor2tensor.layers import common_attention from tensor2tensor.layers import common_hparams @@ -657,7 +657,7 @@ def iterative_encoder_decoder(encoder_input, query, hparams): """Iterative encoder decoder.""" - for _ in xrange(hparams.num_rec_steps): + for _ in range(hparams.num_rec_steps): with tf.variable_scope("step", reuse=tf.AUTO_REUSE): encoder_output = image_question_encoder( encoder_input,