diff --git a/src/python/nimbusml/internal/core/linear_model/linearsvmbinaryclassifier.py b/src/python/nimbusml/internal/core/linear_model/linearsvmbinaryclassifier.py index 0109ba44..36bf3a19 100644 --- a/src/python/nimbusml/internal/core/linear_model/linearsvmbinaryclassifier.py +++ b/src/python/nimbusml/internal/core/linear_model/linearsvmbinaryclassifier.py @@ -69,7 +69,9 @@ class LinearSvmBinaryClassifier( :param caching: Whether trainer should cache input training data. - :param lambda_: Regularizer constant. + :param l2_regularization: L2 regularization weight. This also controls the + learning rate, with the learning rate being inversely proportional to + the regularization weight. :param perform_projection: Perform projection to unit-ball? Typically used with batch size > 1. @@ -105,7 +107,7 @@ def __init__( self, normalize='Auto', caching='Auto', - lambda_=0.001, + l2_regularization=0.001, perform_projection=False, number_of_iterations=1, initial_weights_diameter=0.0, @@ -119,7 +121,7 @@ def __init__( self.normalize = normalize self.caching = caching - self.lambda_ = lambda_ + self.l2_regularization = l2_regularization self.perform_projection = perform_projection self.number_of_iterations = number_of_iterations self.initial_weights_diameter = initial_weights_diameter @@ -146,7 +148,7 @@ def _get_node(self, **all_args): all_args), normalize_features=self.normalize, caching=self.caching, - lambda_=self.lambda_, + lambda_=self.l2_regularization, perform_projection=self.perform_projection, number_of_iterations=self.number_of_iterations, initial_weights_diameter=self.initial_weights_diameter, @@ -157,3 +159,4 @@ def _get_node(self, **all_args): all_args.update(algo_args) return self._entrypoint(**all_args) + \ No newline at end of file diff --git a/src/python/nimbusml/linear_model/linearsvmbinaryclassifier.py b/src/python/nimbusml/linear_model/linearsvmbinaryclassifier.py index 27511e27..15ebdd63 100644 --- a/src/python/nimbusml/linear_model/linearsvmbinaryclassifier.py +++ b/src/python/nimbusml/linear_model/linearsvmbinaryclassifier.py @@ -78,7 +78,9 @@ class LinearSvmBinaryClassifier( :param caching: Whether trainer should cache input training data. - :param lambda_: Regularizer constant. + :param l2_regularization: L2 regularization weight. This also controls the + learning rate, with the learning rate being inversely proportional to + the regularization weight. :param perform_projection: Perform projection to unit-ball? Typically used with batch size > 1. @@ -114,7 +116,7 @@ def __init__( self, normalize='Auto', caching='Auto', - lambda_=0.001, + l2_regularization=0.001, perform_projection=False, number_of_iterations=1, initial_weights_diameter=0.0, @@ -147,7 +149,7 @@ def __init__( self, normalize=normalize, caching=caching, - lambda_=lambda_, + l2_regularization=l2_regularization, perform_projection=perform_projection, number_of_iterations=number_of_iterations, initial_weights_diameter=initial_weights_diameter, diff --git a/src/python/tools/manifest_diff.json b/src/python/tools/manifest_diff.json index d8a64d82..d88e735f 100644 --- a/src/python/tools/manifest_diff.json +++ b/src/python/tools/manifest_diff.json @@ -241,7 +241,13 @@ "Module": "linear_model", "Type": "Classifier", "Predict_Proba" : true, - "Decision_Function" : true + "Decision_Function" : true, + "Inputs": [{ + "Name": "Lambda", + "NewName": "l2_regularization", + "Desc": "L2 regularization weight. This also controls the learning rate, with the learning rate being inversely proportional to the regularization weight." + } + ] }, { "Name": "Trainers.EnsembleClassification",