Skip to content

Commit

Permalink
seed on test normalizers
Browse files Browse the repository at this point in the history
  • Loading branch information
Franck Mamalet committed Oct 9, 2024
1 parent 8a05fc7 commit 016b9e1
Show file tree
Hide file tree
Showing 6 changed files with 23 additions and 104 deletions.
2 changes: 1 addition & 1 deletion tests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def train_k_lip_model(
# define logging features
logdir = os.path.join("logs", uft.LIP_LAYERS, "%s" % layer_type.__name__)
os.makedirs(logdir, exist_ok=True)

callback_list = (
[]
) # [callbacks.TensorBoard(logdir), hp.KerasCallback(logdir, hparams)]
Expand Down
12 changes: 6 additions & 6 deletions tests/test_losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,14 +92,14 @@ def check_serialization(nb_class, loss):


def binary_data(x):
"""Return a Framework float32 tensor of shape [N, 1]
from a list/np.array of shape [N]"""
"""Return a Framework float32 tensor of shape [N, 1]
from a list/np.array of shape [N]"""
return np.expand_dims(np.array(x, dtype=np.float32), axis=-1)


def one_hot_data(x, n_class):
"""Return a Framework float32 tensor of shape [N, n_class]
from a list/np.array of shape [N]"""
"""Return a Framework float32 tensor of shape [N, n_class]
from a list/np.array of shape [N]"""
return np.eye(n_class)[x]


Expand Down Expand Up @@ -840,8 +840,8 @@ def test_minibatches_binary_loss_generic(
def test_multilabel_loss_generic(loss_instance, loss_params, rtol):
"""
Assert binary losses with multilabels.
Three losses are tested (KRLoss, HingeMarginLoss and HKRLoss).
We compare losses with three separate binary classification and
Three losses are tested (KRLoss, HingeMarginLoss and HKRLoss).
We compare losses with three separate binary classification and
the corresponding multilabel problem.
"""
# Create predictions and labels for 3 binary problems and the concatenated
Expand Down
4 changes: 2 additions & 2 deletions tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@


def one_hot_data(x, n_class):
"""Return a Framework float32 tensor of shape [N, n_class]
from a list/np.array of shape [N]"""
"""Return a Framework float32 tensor of shape [N, n_class]
from a list/np.array of shape [N]"""
return np.eye(n_class)[x]


Expand Down
13 changes: 13 additions & 0 deletions tests/test_normalizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,8 @@ def test_kernel_conv_svd(kernel_shape, strides):
if hasattr(_padding_circular, "unavailable_class"):
pytest.skip("_padding_circular not implemented")

np.random.seed(42)

kernel = np.random.normal(size=kernel_shape).astype("float32")
spectral_input_shape, RO_case, cPad = set_spectral_input_shape(kernel, strides)

Expand Down Expand Up @@ -220,6 +222,8 @@ def test_kernel_conv_svd(kernel_shape, strides):
],
)
def test_bjorck_normalization(kernel_shape):
np.random.seed(42)

kernel = np.random.normal(size=kernel_shape).astype("float32")
"""Compare max singular value using power iteration and tf.linalg.svd"""
sigmas_svd = np.linalg.svd(
Expand Down Expand Up @@ -265,6 +269,7 @@ def test_bjorck_normalization(kernel_shape):
def test_reshaped_kernel_orthogonalization(kernel_shape):
if hasattr(reshaped_kernel_orthogonalization, "unavailable_class"):
pytest.skip("reshaped_kernel_orthogonalization not implemented")
np.random.seed(42)

kernel = np.random.normal(size=kernel_shape).astype("float32")
"""Compare max singular value using power iteration and tf.linalg.svd"""
Expand Down Expand Up @@ -312,6 +317,8 @@ def test_bjorck_norm():
"""
test bjorck_norm parametrization implementation
"""
np.random.seed(42)

m = uft.get_instance_framework(
tLinear, {"in_features": 2, "out_features": 2}
) # torch.nn.Linear(2, 2)
Expand Down Expand Up @@ -351,6 +358,8 @@ def test_frobenius_norm():
"""
test frobenius_norm parametrization implementation
"""
np.random.seed(42)

m = uft.get_instance_framework(
tLinear, {"in_features": 2, "out_features": 2}
) # torch.nn.Linear(2, 2)
Expand Down Expand Up @@ -386,6 +395,8 @@ def test_frobenius_norm_disjoint_neurons():
"""
Test `disjoint_neurons=True` argument in frobenius_norm parametrization
"""
np.random.seed(42)

params = {"in_features": 5, "out_features": 3}
m = uft.get_instance_framework(tLinear, params)

Expand All @@ -410,6 +421,8 @@ def test_lconv_norm():
"""
test lconv_norm parametrization implementation
"""
np.random.seed(42)

params = {
"in_channels": 1,
"out_channels": 2,
Expand Down
95 changes: 0 additions & 95 deletions tests/test_updownsampling_pytest.py

This file was deleted.

1 change: 1 addition & 0 deletions tests/utils_framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,7 @@ def get_instance_withcheck(
),
}


def get_instance_framework(instance_type, inst_params):
if instance_type not in getters_dict:
instance = get_instance_generic(instance_type, inst_params)
Expand Down

0 comments on commit 016b9e1

Please sign in to comment.