Skip to content

Commit

Permalink
linter
Browse files Browse the repository at this point in the history
  • Loading branch information
Franck Mamalet committed Oct 8, 2024
1 parent 4325e43 commit 554e499
Show file tree
Hide file tree
Showing 8 changed files with 135 additions and 36 deletions.
18 changes: 15 additions & 3 deletions tests/test_compute_layer_sv.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,11 @@ def train_compute_and_verifySV(
),
dict(
layer_type=SpectralConv2d,
layer_params={"in_channels": 1, "out_channels": 2, "kernel_size": (3, 3),},
layer_params={
"in_channels": 1,
"out_channels": 2,
"kernel_size": (3, 3),
},
batch_size=100,
steps_per_epoch=125,
epochs=5,
Expand Down Expand Up @@ -364,7 +368,11 @@ def train_compute_and_verifySV(
),
dict(
layer_type=FrobeniusConv2d,
layer_params={"in_channels": 1, "out_channels": 2, "kernel_size": (3, 3),},
layer_params={
"in_channels": 1,
"out_channels": 2,
"kernel_size": (3, 3),
},
batch_size=100,
steps_per_epoch=125,
epochs=5,
Expand Down Expand Up @@ -395,7 +403,11 @@ def train_compute_and_verifySV(
),
dict(
layer_type=FrobeniusConv2d,
layer_params={"in_channels": 3, "out_channels": 2, "kernel_size": (3, 3),},
layer_params={
"in_channels": 3,
"out_channels": 2,
"kernel_size": (3, 3),
},
batch_size=100,
steps_per_epoch=125,
epochs=5,
Expand Down
22 changes: 17 additions & 5 deletions tests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,13 @@ def train_k_lip_model(

traind_ds = linear_generator(batch_size, input_shape, kernel)
uft.train(
traind_ds, model, loss_fn, optimizer, epochs, batch_size, steps_per_epoch=10,
traind_ds,
model,
loss_fn,
optimizer,
epochs,
batch_size,
steps_per_epoch=10,
)
# the seed is set to compare all models with the same data
test_dl = linear_generator(batch_size, input_shape, kernel)
Expand Down Expand Up @@ -271,13 +277,19 @@ def _check_emp_lip_const(emp_lip_const, from_disk_emp_lip_const, test_params):

def _apply_tests_bank(test_params):
pp.pprint(test_params)
(mse, emp_lip_const, from_disk_mse, from_disk_emp_lip_const,) = train_k_lip_model(
**test_params
)
(
mse,
emp_lip_const,
from_disk_mse,
from_disk_emp_lip_const,
) = train_k_lip_model(**test_params)
print("test mse: %f" % mse)
print(
"empirical lip const: %f ( expected %s )"
% (emp_lip_const, min(test_params["k_lip_model"], test_params["k_lip_data"]),)
% (
emp_lip_const,
min(test_params["k_lip_model"], test_params["k_lip_data"]),
)
)
_check_mse_results(mse, from_disk_mse, test_params)
_check_emp_lip_const(emp_lip_const, from_disk_emp_lip_const, test_params)
Expand Down
11 changes: 10 additions & 1 deletion tests/test_losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,16 @@ def one_hot_data(x, n_class):
dtype=np.float32,
)
results_tau_cat = np.float64(
[0.044275, 0.115109, 1.243572, 0.084923, 0.010887, 2.802300, 0.114224, 0.076357,]
[
0.044275,
0.115109,
1.243572,
0.084923,
0.010887,
2.802300,
0.114224,
0.076357,
]
)
y_predgaussian, y_truegaussian = get_gaussian_data(20000)
n_class = 10
Expand Down
47 changes: 39 additions & 8 deletions tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,15 @@ def one_hot_data(x, n_class):
{"epsilon": 1, "lip_const": 1.0, "disjoint_neurons": False},
10,
),
(10, BinaryProvableRobustAccuracy, {"epsilon": 1, "lip_const": 1.0,}, 10,),
(
10,
BinaryProvableRobustAccuracy,
{
"epsilon": 1,
"lip_const": 1.0,
},
10,
),
(
10,
CategoricalProvableAvgRobustness,
Expand Down Expand Up @@ -105,7 +113,9 @@ def test_serialization(nb_class, loss, loss_params, nb_classes):
)
l2 = m2.evaluate(x, y)
np.testing.assert_equal(
l1, l2, err_msg=f"serialization changed loss value for {loss}",
l1,
l2,
err_msg=f"serialization changed loss value for {loss}",
)
return

Expand Down Expand Up @@ -182,7 +192,11 @@ def test_provable_vs_adjusted(loss, loss_params, nb_class):
[
(
CategoricalProvableAvgRobustness,
{"lip_const": 1.0, "disjoint_neurons": True, "negative_robustness": True,},
{
"lip_const": 1.0,
"disjoint_neurons": True,
"negative_robustness": True,
},
10,
),
(
Expand Down Expand Up @@ -228,7 +242,11 @@ def test_data_format(loss, loss_params, nb_class):
[
(
CategoricalProvableAvgRobustness,
{"lip_const": 1.0, "disjoint_neurons": False, "negative_robustness": True,},
{
"lip_const": 1.0,
"disjoint_neurons": False,
"negative_robustness": True,
},
10,
),
(
Expand Down Expand Up @@ -310,28 +328,41 @@ def test_disjoint_neurons(loss, loss_params, nb_class):
[
(
CategoricalProvableRobustAccuracy,
{"epsilon": 0.25, "lip_const": 1.0, "disjoint_neurons": False,},
{
"epsilon": 0.25,
"lip_const": 1.0,
"disjoint_neurons": False,
},
y_pred1,
y_true1,
0.25,
),
(
CategoricalProvableAvgRobustness,
{"lip_const": 1.0, "disjoint_neurons": False,},
{
"lip_const": 1.0,
"disjoint_neurons": False,
},
y_pred1,
y_true1,
0.25 * 1.1 / np.sqrt(2),
),
(
BinaryProvableRobustAccuracy,
{"epsilon": 0.25, "lip_const": 1.0,},
{
"epsilon": 0.25,
"lip_const": 1.0,
},
y_pred2,
y_true2,
0.25,
),
(
BinaryProvableAvgRobustness,
{"lip_const": 1.0, "negative_robustness": False,},
{
"lip_const": 1.0,
"negative_robustness": False,
},
y_pred2,
y_true2,
0.125 * (1.1 * 2),
Expand Down
16 changes: 12 additions & 4 deletions tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,8 @@ def test_deel_lip_Sequential():


@pytest.mark.skipif(
hasattr(tModel, "unavailable_class"), reason="tModel not available",
hasattr(tModel, "unavailable_class"),
reason="tModel not available",
)
def test_Model():
"""Assert vanilla conversion of a tf.keras.Model model"""
Expand All @@ -224,7 +225,8 @@ def test_Model():


@pytest.mark.skipif(
hasattr(Model, "unavailable_class"), reason="Model not available",
hasattr(Model, "unavailable_class"),
reason="Model not available",
)
def test_lip_Model():
"""Assert vanilla conversion of a deel.lip.Model model"""
Expand Down Expand Up @@ -279,7 +281,10 @@ def test_warning_unsupported_1Lip_layers():
with warnings.catch_warnings(record=True) as w:
if lay is not None:
_ = uft.generate_k_lip_model(
Sequential, {"layers": [lay]}, input_shape=None, k=None,
Sequential,
{"layers": [lay]},
input_shape=None,
k=None,
)
assert len(w) == 0, f"Layer {lay} shouldn't raise warning"

Expand Down Expand Up @@ -308,7 +313,10 @@ def test_warning_unsupported_1Lip_layers():
with pytest.warns(Warning):
if lay is not None:
_ = uft.generate_k_lip_model(
Sequential, {"layers": [lay]}, input_shape=None, k=None,
Sequential,
{"layers": [lay]},
input_shape=None,
k=None,
)


Expand Down
42 changes: 32 additions & 10 deletions tests/test_normalizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,11 @@


@pytest.mark.parametrize(
"kernel_shape", [(15, 32), (32, 15),],
"kernel_shape",
[
(15, 32),
(32, 15),
],
)
def test_kernel_svd(kernel_shape):
"""Compare max singular value using power iteration and np.linalg.svd"""
Expand Down Expand Up @@ -120,7 +124,7 @@ def set_spectral_input_shape(kernel, strides):
if stride > 1:
N = int(0.5 + N / stride)

if c_in * stride ** 2 > c_out:
if c_in * stride**2 > c_out:
spectral_input_shape = (N, N, c_out)
RO_case = True
else:
Expand All @@ -131,7 +135,11 @@ def set_spectral_input_shape(kernel, strides):

@pytest.mark.parametrize(
"kernel_shape, strides",
[((5, 5, 32, 64), (1, 1)), ((3, 3, 12, 8), (1, 1)), ((3, 3, 24, 24), (1, 1)),],
[
((5, 5, 32, 64), (1, 1)),
((3, 3, 12, 8), (1, 1)),
((3, 3, 24, 24), (1, 1)),
],
)
def test_kernel_conv_svd(kernel_shape, strides):
"""Compare power iteration conv against SVD."""
Expand All @@ -147,7 +155,9 @@ def test_kernel_conv_svd(kernel_shape, strides):
# Compute max singular value using FFT2 and SVD
kernel_n = kernel.astype(dtype="float32")
transforms = np.fft.fft2(
kernel_n, (spectral_input_shape[0], spectral_input_shape[1]), axes=[0, 1],
kernel_n,
(spectral_input_shape[0], spectral_input_shape[1]),
axes=[0, 1],
)
svd = np.linalg.svd(transforms, compute_uv=False)
SVmax = np.max(svd)
Expand Down Expand Up @@ -203,7 +213,11 @@ def test_kernel_conv_svd(kernel_shape, strides):


@pytest.mark.parametrize(
"kernel_shape", [(15, 32), (64, 32),],
"kernel_shape",
[
(15, 32),
(64, 32),
],
)
def test_bjorck_normalization(kernel_shape):
kernel = np.random.normal(size=kernel_shape).astype("float32")
Expand Down Expand Up @@ -242,7 +256,11 @@ def test_bjorck_normalization(kernel_shape):


@pytest.mark.parametrize(
"kernel_shape", [(15, 32), (5, 5, 64, 32),],
"kernel_shape",
[
(15, 32),
(5, 5, 64, 32),
],
)
def test_reshaped_kernel_orthogonalization(kernel_shape):
if hasattr(reshaped_kernel_orthogonalization, "unavailable_class"):
Expand Down Expand Up @@ -287,7 +305,8 @@ def test_reshaped_kernel_orthogonalization(kernel_shape):


@pytest.mark.skipif(
hasattr(bjorck_norm, "unavailable_class"), reason="bjorck_norm not available",
hasattr(bjorck_norm, "unavailable_class"),
reason="bjorck_norm not available",
)
def test_bjorck_norm():
"""
Expand Down Expand Up @@ -325,7 +344,8 @@ def test_bjorck_norm():


@pytest.mark.skipif(
hasattr(frobenius_norm, "unavailable_class"), reason="frobenius_norm not available",
hasattr(frobenius_norm, "unavailable_class"),
reason="frobenius_norm not available",
)
def test_frobenius_norm():
"""
Expand Down Expand Up @@ -359,7 +379,8 @@ def test_frobenius_norm():


@pytest.mark.skipif(
hasattr(frobenius_norm, "unavailable_class"), reason="frobenius_norm not available",
hasattr(frobenius_norm, "unavailable_class"),
reason="frobenius_norm not available",
)
def test_frobenius_norm_disjoint_neurons():
"""
Expand All @@ -382,7 +403,8 @@ def test_frobenius_norm_disjoint_neurons():


@pytest.mark.skipif(
hasattr(lconv_norm, "unavailable_class"), reason="lconv_norm not available",
hasattr(lconv_norm, "unavailable_class"),
reason="lconv_norm not available",
)
def test_lconv_norm():
"""
Expand Down
9 changes: 6 additions & 3 deletions tests/test_regularizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@


@pytest.mark.skipif(
hasattr(Lorth2d, "unavailable_class"), reason="Lorth2d not available",
hasattr(Lorth2d, "unavailable_class"),
reason="Lorth2d not available",
)
@pytest.mark.parametrize(
"kernel_shape,stride,delta,padding",
Expand Down Expand Up @@ -77,7 +78,8 @@ def test_set_kernel_shape(kernel_shape, stride, delta, padding):


@pytest.mark.skipif(
hasattr(Lorth2d, "unavailable_class"), reason="Lorth2d not available",
hasattr(Lorth2d, "unavailable_class"),
reason="Lorth2d not available",
)
@pytest.mark.parametrize(
"kernel_shape,stride,err,err_msg",
Expand Down Expand Up @@ -115,7 +117,8 @@ def test_existence_orthogonal_conv(kernel_shape, stride, err, err_msg):


@pytest.mark.skipif(
hasattr(Lorth2d, "unavailable_class"), reason="Lorth2d not available",
hasattr(Lorth2d, "unavailable_class"),
reason="Lorth2d not available",
)
def test_compute_lorth():
"""Assert Lorth2d computation on an identity kernel => must return 0"""
Expand Down
Loading

0 comments on commit 554e499

Please sign in to comment.