diff --git a/setup.py b/setup.py index 16116a784..f99f00553 100644 --- a/setup.py +++ b/setup.py @@ -26,8 +26,8 @@ 'psutil', 'scipy', 'seaborn==0.8.1', - 'tensorflow>=1.8.0,<2.0', - 'torch==1.3.1', + 'tensorflow>=1.8.0,<3.0', + 'torch==1.13.1', 'tqdm' ], description="Teaching tools for introducing people to deep RL.", diff --git a/spinup/utils/mpi_tf.py b/spinup/utils/mpi_tf.py index 96cbcf5e0..3c0bb7b8e 100644 --- a/spinup/utils/mpi_tf.py +++ b/spinup/utils/mpi_tf.py @@ -26,7 +26,7 @@ def sync_all_params(): return sync_params(tf.global_variables()) -class MpiAdamOptimizer(tf.train.AdamOptimizer): +class MpiAdamOptimizer(tf.compat.v1.train.AdamOptimizer): """ Adam optimizer that averages gradients across MPI processes. @@ -40,7 +40,7 @@ class MpiAdamOptimizer(tf.train.AdamOptimizer): def __init__(self, **kwargs): self.comm = MPI.COMM_WORLD - tf.train.AdamOptimizer.__init__(self, **kwargs) + tf.compat.v1.train.AdamOptimizer.__init__(self, **kwargs) def compute_gradients(self, loss, var_list, **kwargs): """ @@ -75,4 +75,4 @@ def apply_gradients(self, grads_and_vars, global_step=None, name=None): opt = super().apply_gradients(grads_and_vars, global_step, name) with tf.control_dependencies([opt]): sync = sync_params([v for g,v in grads_and_vars]) - return tf.group([opt, sync]) \ No newline at end of file + return tf.group([opt, sync])