Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ASR] add python simple adadelta optimizer, test=asr #2925

Merged
merged 4 commits into from
Feb 17, 2023
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 14 additions & 29 deletions paddlespeech/s2t/training/optimizer/adadelta.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict

import paddle
from paddle import _C_ops
from paddle import _legacy_C_ops
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.dygraph import base as imperative_base
from paddle.fluid.dygraph import no_grad
from paddle.fluid.framework import name_scope
from paddle.fluid.framework import Variable
from paddle.framework import in_dygraph_mode
from paddle.optimizer import Optimizer

__all__ = []
Expand Down Expand Up @@ -62,17 +51,17 @@ class SimpleAdadelta(Optimizer):
If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \
the regularization setting here in optimizer will be ignored for this parameter. \
Otherwise, the regularization setting here in optimizer will take effect. \
Default None, meaning there is no regularization.
Default None, meaning there is no regularization.
foreach (bool, optional): whether foreach implementation of optimizer is used. The default value is None.
maximize (bool, optional): maximize the params based on the objective, instead of minimizing.
maximize (bool, optional): maximize the params based on the objective, instead of minimizing.
The default value is False.
name (str, optional): The default value is None. Normally there is no need for user
to set this property. For more information, please refer to
:ref:`api_guide_Name` .

Examples:
.. code-block:: python

import paddle
from paddlespeech.s2t.training.optimizer.adadelta import SimpleAdadelta

Expand Down Expand Up @@ -120,8 +109,7 @@ def __init__(
self.square_avgs = []
self.acc_deltas = []

@imperative_base.no_grad
@framework.dygraph_only
zh794390558 marked this conversation as resolved.
Show resolved Hide resolved
@paddle.no_grad()
def step(self):
"""Performs a single optimization step.

Expand Down Expand Up @@ -173,19 +161,16 @@ def step(self):
maximize=self._maximize)


def adadelta(
params_grads,
square_avgs,
acc_deltas,
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach=None,
*,
learning_rate: float,
rho: float,
epsilon: float,
weight_decay: float,
maximize: bool):
def adadelta(params_grads,
square_avgs,
acc_deltas,
foreach=None,
*,
learning_rate: float,
rho: float,
epsilon: float,
weight_decay: float,
maximize: bool):

if foreach is None:
# if foreach is None, set False
Expand Down