diff --git a/config.py b/config.py index f952f15..302d0f5 100644 --- a/config.py +++ b/config.py @@ -36,6 +36,7 @@ def add_argument_group(name): train_arg.add_argument('--lr_update_step', type=int, default=100000, choices=[100000, 75000]) train_arg.add_argument('--d_lr', type=float, default=0.00008) train_arg.add_argument('--g_lr', type=float, default=0.00008) +train_arg.add_argument('--lr_lower_boundary', type=float, default=0.00002) train_arg.add_argument('--beta1', type=float, default=0.5) train_arg.add_argument('--beta2', type=float, default=0.999) train_arg.add_argument('--gamma', type=float, default=0.5) diff --git a/trainer.py b/trainer.py index 7d33825..3fd8868 100644 --- a/trainer.py +++ b/trainer.py @@ -62,8 +62,8 @@ def __init__(self, config, data_loader): self.g_lr = tf.Variable(config.g_lr, name='g_lr') self.d_lr = tf.Variable(config.d_lr, name='d_lr') - self.g_lr_update = tf.assign(self.g_lr, self.g_lr * 0.5, name='g_lr_update') - self.d_lr_update = tf.assign(self.d_lr, self.d_lr * 0.5, name='d_lr_update') + self.g_lr_update = tf.assign(self.g_lr, tf.maximum(self.g_lr * 0.5, config.lr_lower_boundary), name='g_lr_update') + self.d_lr_update = tf.assign(self.d_lr, tf.maximum(self.d_lr * 0.5, config.lr_lower_boundary), name='d_lr_update') self.gamma = config.gamma self.lambda_k = config.lambda_k