From 1fd81f491c3e8c1764ab3428e4689b006b0eda1a Mon Sep 17 00:00:00 2001 From: Franck Mamalet <49721198+franckma31@users.noreply.github.com> Date: Mon, 2 Dec 2024 13:50:42 +0100 Subject: [PATCH] update for compatibility with tensorflow tests --- tests/test_layers.py | 6 ++- tests/test_losses.py | 1 - tests/test_metrics.py | 1 - tests/test_normalization.py | 16 ++++++++ tests/test_normalizers.py | 1 - tests/test_unconstrained_layers.py | 6 +-- tests/test_updownsampling.py | 51 +++++++++++++++--------- tests/utils_framework.py | 62 +++++++++++++++++++++++------- 8 files changed, 105 insertions(+), 39 deletions(-) diff --git a/tests/test_layers.py b/tests/test_layers.py index 86d15c3..c3e0e5c 100644 --- a/tests/test_layers.py +++ b/tests/test_layers.py @@ -149,7 +149,7 @@ def train_k_lip_model( input_shape: tuple, k_lip_model: float, k_lip_data: float, - **kwargs + **kwargs, ): """ Create a generator, create a model, train it and return the results. @@ -630,6 +630,8 @@ def test_spectralconv2d_pad( layer_params["padding"] = pad layer_params["padding_mode"] = pad_mode layer_params["kernel_size"] = kernel_size + if not uft.is_supported_padding(pad_mode, SpectralConv2d): + pytest.skip(f"SpectralConv2d: Padding {pad_mode} not supported") test_params = dict( layer_type=SpectralConv2d, layer_params=layer_params, @@ -1372,6 +1374,8 @@ def test_Conv2d_vanilla_export(pad, pad_mode, kernel_size, layer_params, layer_t layer_type = layer_type input_shape = (1, 5, 5) + if not uft.is_supported_padding(pad_mode, layer_type): + pytest.skip(f"{layer_type}: Padding {pad_mode} not supported") model = uft.generate_k_lip_model(layer_type, layer_params, input_shape, 1.0) # lay = SpectralConvTranspose2d(**kwargs) diff --git a/tests/test_losses.py b/tests/test_losses.py index 12e9746..b0cd03d 100644 --- a/tests/test_losses.py +++ b/tests/test_losses.py @@ -228,7 +228,6 @@ def test_loss_generic_value( y_true, y_pred = uft.to_tensor(y_true_np), uft.to_tensor(y_pred_np) loss_val = uft.compute_loss(loss, y_pred, y_true).numpy() - print("loss_val", loss_val, expected_loss) np.testing.assert_allclose( loss_val, np.float32(expected_loss), diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 0be592c..62946a6 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -180,7 +180,6 @@ def test_provable_vs_adjusted(loss, loss_params, nb_class): l1 = pr(y, x).numpy() l2 = ar(y, x).numpy() - print(l1, l2) diff = np.min(np.abs(l1 - l2)) assert ( diff > 1e-4 diff --git a/tests/test_normalization.py b/tests/test_normalization.py index 6aa3d5f..a6ffbed 100644 --- a/tests/test_normalization.py +++ b/tests/test_normalization.py @@ -61,6 +61,10 @@ def check_serialization(layer_type, layer_params, input_shape=(10,)): np.testing.assert_allclose(uft.to_numpy(y1), uft.to_numpy(y2)) +@pytest.mark.skipif( + hasattr(LayerCentering, "unavailable_class"), + reason="LayerCentering not available", +) @pytest.mark.parametrize( "size, input_shape, bias", [ @@ -92,6 +96,10 @@ def test_LayerCentering(size, input_shape, bias): ) # eval mode use running_mean +@pytest.mark.skipif( + hasattr(BatchCentering, "unavailable_class"), + reason="BatchCentering not available", +) @pytest.mark.parametrize( "size, input_shape, bias", [ @@ -150,6 +158,8 @@ def test_BatchCentering(size, input_shape, bias): ) def test_Normalization_serialization(norm_type, size, input_shape, bias): # Check serialization + if hasattr(norm_type, "unavailable_class"): + pytest.skip(f"{norm_type} not available") check_serialization( norm_type, layer_params={"size": size, "bias": bias}, input_shape=input_shape ) @@ -189,6 +199,8 @@ def linear_generator(batch_size, input_shape: tuple): ], ) def test_Normalization_bias(norm_type, size, input_shape, bias): + if hasattr(norm_type, "unavailable_class"): + pytest.skip(f"{norm_type} not available") m = uft.generate_k_lip_model( norm_type, layer_params={"size": size, "bias": bias}, @@ -221,6 +233,10 @@ def test_Normalization_bias(norm_type, size, input_shape, bias): assert np.linalg.norm(bb) != 0.0 +@pytest.mark.skipif( + hasattr(BatchCentering, "unavailable_class"), + reason="BatchCentering not available", +) @pytest.mark.parametrize( "size, input_shape, bias", [ diff --git a/tests/test_normalizers.py b/tests/test_normalizers.py index 4294a4b..26d35e6 100644 --- a/tests/test_normalizers.py +++ b/tests/test_normalizers.py @@ -64,7 +64,6 @@ ) def test_kernel_svd(kernel_shape): """Compare max singular value using power iteration and np.linalg.svd""" - print(kernel_shape) kernel = rng.normal(size=kernel_shape).astype("float32") sigmas_svd = np.linalg.svd( np.reshape(kernel, (np.prod(kernel.shape[:-1]), kernel.shape[-1])), diff --git a/tests/test_unconstrained_layers.py b/tests/test_unconstrained_layers.py index 1c82c31..cd99255 100644 --- a/tests/test_unconstrained_layers.py +++ b/tests/test_unconstrained_layers.py @@ -71,7 +71,7 @@ def compare(x, x_ref, index_x=[], index_x_ref=[]): def test_padding(padding_tested, input_shape, batch_size, kernel_size, filters): """Test different padding types: assert values in original and padded tensors""" input_shape = uft.to_framework_channel(input_shape) - if not uft.is_supported_padding(padding_tested): + if not uft.is_supported_padding(padding_tested, PadConv2d): pytest.skip(f"Padding {padding_tested} not supported") kernel_size_list = kernel_size if isinstance(kernel_size, (int, float)): @@ -176,7 +176,7 @@ def test_predict(padding_tested, input_shape, batch_size, kernel_size, filters): in_ch = input_shape[0] input_shape = uft.to_framework_channel(input_shape) - if not uft.is_supported_padding(padding_tested): + if not uft.is_supported_padding(padding_tested, PadConv2d): pytest.skip(f"Padding {padding_tested} not supported") layer_params = { "out_channels": 2, @@ -250,7 +250,7 @@ def test_vanilla(padding_tested, input_shape, batch_size, kernel_size, filters): in_ch = input_shape[0] input_shape = uft.to_framework_channel(input_shape) - if not uft.is_supported_padding(padding_tested): + if not uft.is_supported_padding(padding_tested, PadConv2d): pytest.skip(f"Padding {padding_tested} not supported") layer_params = { "out_channels": 2, diff --git a/tests/test_updownsampling.py b/tests/test_updownsampling.py index 31a419b..a674f7d 100644 --- a/tests/test_updownsampling.py +++ b/tests/test_updownsampling.py @@ -38,7 +38,7 @@ def check_downsample(x, y, kernel_size): for dy in range(kernel_size): xx = x[:, :, dx::kernel_size, dy::kernel_size] yy = y[:, index :: (kernel_size * kernel_size), :, :] - np.testing.assert_array_equal(xx, yy) + np.testing.assert_almost_equal(xx, yy, decimal=6) index += 1 @@ -47,29 +47,37 @@ def check_downsample(x, y, kernel_size): reason="InvertibleDownSampling not available", ) def test_invertible_downsample(): - - x = np.arange(32).reshape(1, 2, 4, 4) + x_np = np.arange(32).reshape(1, 2, 4, 4) + x = uft.to_NCHW_inv(x_np) x = uft.to_tensor(x) dw_layer = uft.get_instance_framework(InvertibleDownSampling, {"kernel_size": 2}) y = dw_layer(x) - assert y.shape == (1, 8, 2, 2) - check_downsample(x, y, 2) + y_np = uft.to_numpy(y) + y_np = uft.to_NCHW(y_np) + assert y_np.shape == (1, 8, 2, 2) + check_downsample(x_np, y_np, 2) # 2D input - x = np.random.rand(10, 1, 128, 128) # torch.rand(10, 1, 128, 128) + x_np = np.random.rand(10, 1, 128, 128) # torch.rand(10, 1, 128, 128) + x = uft.to_NCHW_inv(x_np) x = uft.to_tensor(x) dw_layer = uft.get_instance_framework(InvertibleDownSampling, {"kernel_size": 4}) y = dw_layer(x) - assert y.shape == (10, 16, 32, 32) - check_downsample(x, y, 4) + y_np = uft.to_numpy(y) + y_np = uft.to_NCHW(y_np) + assert y_np.shape == (10, 16, 32, 32) + check_downsample(x_np, y_np, 4) - x = np.random.rand(10, 4, 64, 64) + x_np = np.random.rand(10, 4, 64, 64) + x = uft.to_NCHW_inv(x_np) x = uft.to_tensor(x) dw_layer = uft.get_instance_framework(InvertibleDownSampling, {"kernel_size": 2}) y = dw_layer(x) - assert y.shape == (10, 16, 32, 32) - check_downsample(x, y, 2) + y_np = uft.to_numpy(y) + y_np = uft.to_NCHW(y_np) + assert y_np.shape == (10, 16, 32, 32) + check_downsample(x_np, y_np, 2) @pytest.mark.skipif( @@ -79,17 +87,22 @@ def test_invertible_downsample(): def test_invertible_upsample(): # 2D input - x = np.random.rand(10, 16, 32, 32) + x_np = np.random.rand(10, 16, 32, 32) + x = uft.to_NCHW_inv(x_np) x = uft.to_tensor(x) dw_layer = uft.get_instance_framework(InvertibleUpSampling, {"kernel_size": 4}) y = dw_layer(x) - assert y.shape == (10, 1, 128, 128) - check_downsample(y, x, 4) + y_np = uft.to_numpy(y) + y_np = uft.to_NCHW(y_np) + assert y_np.shape == (10, 1, 128, 128) + check_downsample(y_np, x_np, 4) dw_layer = uft.get_instance_framework(InvertibleUpSampling, {"kernel_size": 2}) y = dw_layer(x) - assert y.shape == (10, 4, 64, 64) - check_downsample(y, x, 2) + y_np = uft.to_numpy(y) + y_np = uft.to_NCHW(y_np) + assert y_np.shape == (10, 4, 64, 64) + check_downsample(y_np, x_np, 2) @pytest.mark.skipif( @@ -98,7 +111,8 @@ def test_invertible_upsample(): reason="InvertibleUpSampling not available", ) def test_invertible_upsample_downsample(): - x = np.random.rand(10, 16, 32, 32) + x_np = np.random.rand(10, 16, 32, 32) + x = uft.to_NCHW_inv(x_np) x = uft.to_tensor(x) up_layer = uft.get_instance_framework(InvertibleUpSampling, {"kernel_size": 4}) y = up_layer(x) @@ -108,7 +122,8 @@ def test_invertible_upsample_downsample(): assert z.shape == x.shape np.testing.assert_array_equal(x, z) - x = np.random.rand(10, 1, 128, 128) # torch.rand(10, 1, 128, 128) + x_np = np.random.rand(10, 1, 128, 128) # torch.rand(10, 1, 128, 128) + x = uft.to_NCHW_inv(x_np) x = uft.to_tensor(x) dw_layer = uft.get_instance_framework(InvertibleDownSampling, {"kernel_size": 4}) diff --git a/tests/utils_framework.py b/tests/utils_framework.py index a2cd7b9..3c8fc76 100644 --- a/tests/utils_framework.py +++ b/tests/utils_framework.py @@ -208,11 +208,18 @@ def get_instance_withcheck( instance_type, inst_params, dict_keys_replace={}, list_keys_notimplemented=[] ): for k in list_keys_notimplemented: - if k in inst_params: - warnings.warn( - UserWarning("Warning key is not implemented", k, " in pytorch") - ) - return None + if isinstance(k, tuple): + kk = k[0] + kv = k[1] + else: + kk = k + kv = None + if kk in inst_params: + if (kv is None) or inst_params[kk] in kv: + warnings.warn( + UserWarning("Warning key is not implemented", kk, " in tensorflow") + ) + return None layp = replace_key_params(inst_params, dict_keys_replace) return instance_type(**layp) @@ -619,15 +626,42 @@ def vanillaModel(model): return model -def is_supported_padding(padding): - return padding.lower() in [ - "same", - "valid", - "reflect", - "circular", - "symmetric", - "replicate", - ] # "constant", +def is_supported_padding(padding, layer_type): + layertype2padding = { + SpectralConv2d: [ + "same", + "zeros", + "valid", + "reflect", + "circular", + "symmetric", + "replicate", + ], + FrobeniusConv2d: [ + "same", + "zeros", + "valid", + "reflect", + "circular", + "symmetric", + "replicate", + ], + PadConv2d: [ + "same", + "zeros", + "valid", + "reflect", + "circular", + "symmetric", + "replicate", + ], + } + if layer_type in layertype2padding: + return padding.lower() in layertype2padding[layer_type] + else: + assert False + warnings.warn(f"layer {layer_type} type not supported for padding") + return False def pad_input(x, padding, kernel_size):