============================= test session starts ============================== platform linux -- Python 3.8.0, pytest-7.1.2, pluggy-0.13.1 rootdir: /mnt/d/gitlab/shape-analysis-github collected 12 items tests/test_architectures.py ..FF [ 33%] tests/test_build_sparse_adjacency.py .F. [ 58%] tests/test_compute_face_area_and_angle.py . [ 66%] tests/test_metric_conv.py .... [100%] =================================== FAILURES =================================== _________________________ test_metric_conv_net[False] __________________________ classification = False @pytest.mark.parametrize("classification", [True, False]) def test_metric_conv_net(classification: bool): """ Test of ``MetricConvNet`` from the ``metric_conv`` mdoule. :param classification: Boolean indicating whether to convert model to classification network """ kwargs["classification"] = classification model = architectures.MetricConvNet(in_feats, out_feats, **kwargs) > out = model(features_per_vertex, pos, edges, faces) tests/test_architectures.py:65: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ /home/rtrachel/miniconda3/envs/metric-conv/lib/python3.8/site-packages/torch/nn/modules/module.py:1110: in _call_impl return forward_call(*input, **kwargs) models/architectures.py:358: in forward out = self.conv6(x, vertices, edges, faces) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = MetricConvNet( (convs): ModuleList( (0): MetricConv( (metric): FaceMetric( (metric_mlp): MetricMLP...ear(in_features=32, out_features=18, bias=True) ) ) ) ) ) (nonlinear): ELU(alpha=1.0) ) name = 'conv6' def __getattr__(self, name: str) -> Union[Tensor, 'Module']: if '_parameters' in self.__dict__: _parameters = self.__dict__['_parameters'] if name in _parameters: return _parameters[name] if '_buffers' in self.__dict__: _buffers = self.__dict__['_buffers'] if name in _buffers: return _buffers[name] if '_modules' in self.__dict__: modules = self.__dict__['_modules'] if name in modules: return modules[name] > raise AttributeError("'{}' object has no attribute '{}'".format( type(self).__name__, name)) E AttributeError: 'MetricConvNet' object has no attribute 'conv6' /home/rtrachel/miniconda3/envs/metric-conv/lib/python3.8/site-packages/torch/nn/modules/module.py:1185: AttributeError ____________________________ test_linear_metric_net ____________________________ def test_linear_metric_net(): """ Test of ``LinearMetricNet`` from the ``metric_conv`` module. """ model = architectures.LinearMetricNet(in_feats, out_feats, **kwargs) > out = model(pos, edges, faces) tests/test_architectures.py:78: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = LinearMetricNet( (fc0): Linear(in_features=3, out_features=16, bias=True) (conv1): MetricConv( (metric): FaceM...1): Linear(in_features=128, out_features=256, bias=True) (fc2): Linear(in_features=256, out_features=86, bias=True) ) input = (tensor([[ 0.0000, 0.0000, 0.2358], [ 0.0000, 0.7468, 0.0000], [ 0.5387, 0.0000, 0.0000], ... [1, 2, 3, 2, 3, 3, 0, 0, 0, 1, 1, 2]]), tensor([[0, 1, 2], [0, 2, 3], [0, 1, 3], [1, 3, 2]])) kwargs = {} forward_call = def _call_impl(self, *input, **kwargs): forward_call = (self._slow_forward if torch._C._get_tracing_state() else self.forward) # If we don't have any hooks, we want to skip the rest of the logic in # this function, and just call forward. if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks or _global_forward_hooks or _global_forward_pre_hooks): > return forward_call(*input, **kwargs) E TypeError: forward() missing 1 required positional argument: 'faces' /home/rtrachel/miniconda3/envs/metric-conv/lib/python3.8/site-packages/torch/nn/modules/module.py:1110: TypeError ________ TestBuildSparseAdjacency.test_build_sparse_adjacency_symmetric ________ self = def test_build_sparse_adjacency_symmetric(self): """ Test for `metric.build_sparse_adjacency` when `symmetric=True`. """ # Build a dense sparse matrix of size (n_nodes,n_nodes) with nnz non-zero elements. n_nodes = 10 nnz = n_nodes # Non-zero entries idx1 = [] idx2 = [] weights1 = [] weights2 = [] weights_diag = torch.rand(n_nodes) matrix = torch.diag(1 / (1 + weights_diag ** 2)) for _ in range(nnz): i = torch.randint(0, n_nodes, (1,)) j = torch.randint(0, n_nodes, (1,)) if [i, j] in idx1 or [j, i] in idx1 or i == j: # Make sure that an entry will not have multiple values continue w1 = torch.rand(1) w2 = torch.rand(1) matrix[i, j] = 1 / (1 + w1 ** 2) matrix[j, i] = 1 / (1 + w2 ** 2) idx1.append([i, j]) weights1.append(w1) idx2.append([j, i]) weights2.append(w2) idx1 = torch.tensor(idx1).t() idx2 = torch.tensor(idx2).t() weights1 = torch.tensor(weights1) weights2 = torch.tensor(weights2) sum_row = matrix.sum(dim=1) matrix = matrix / sum_row.view(n_nodes, 1) answer = 0.5 * (matrix + matrix.t()) # Symmetrize matrix (this is what will be tested against) idx_identity = torch.cat( ( torch.arange(n_nodes).view(1, n_nodes), torch.arange(n_nodes).view(1, n_nodes), ), dim=0, ) idx = torch.cat((idx_identity, idx1, idx2), dim=1) weights = torch.cat((weights_diag, weights1, weights2)) x = metric.build_sparse_adjacency(idx, weights, device="cpu", symmetric=True, remove_reference=False) > assert (torch.abs(x.to_dense() - answer) < 1e-6).sum() == answer.nelement() E assert tensor(71) == 100 E + where tensor(71) = () E + where = tensor([[0.0193, 0.0000, 0.0000, 0.0261, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0559],\n [0.0000, 0....,\n 0.0000],\n [0.0378, 0.0000, 0.0000, 0.0000, 0.0079, 0.0210, 0.0000, 0.0000, 0.0000,\n 0.0035]]) < 1e-06.sum E + where tensor([[0.0193, 0.0000, 0.0000, 0.0261, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.0559],\n [0.0000, 0....,\n 0.0000],\n [0.0378, 0.0000, 0.0000, 0.0000, 0.0079, 0.0210, 0.0000, 0.0000, 0.0000,\n 0.0035]]) = ((tensor([[0.3171, 0.0000, 0.0000, 0.3176, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.3654],\n [0.0000, 0....,\n 0.0000],\n [0.2717, 0.0000, 0.0000, 0.0000, 0.2000, 0.2735, 0.0000, 0.0000, 0.0000,\n 0.2548]]) - tensor([[0.2978, 0.0000, 0.0000, 0.2915, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.3094],\n [0.0000, 0....,\n 0.0000],\n [0.3094, 0.0000, 0.0000, 0.0000, 0.2079, 0.2945, 0.0000, 0.0000, 0.0000,\n 0.2513]]))) E + where = torch.abs E + and tensor([[0.3171, 0.0000, 0.0000, 0.3176, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.3654],\n [0.0000, 0....,\n 0.0000],\n [0.2717, 0.0000, 0.0000, 0.0000, 0.2000, 0.2735, 0.0000, 0.0000, 0.0000,\n 0.2548]]) = () E + where = tensor(indices=tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 5, 4, 6, 1, 5, 5, 4,\n 4, 9, 3, 6, 9...\n 0.1448, 0.1200, 0.1315, 0.1559, 0.0986]),\n size=(10, 10), nnz=40, layout=torch.sparse_coo).to_dense E + and 100 = () E + where = tensor([[0.2978, 0.0000, 0.0000, 0.2915, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,\n 0.3094],\n [0.0000, 0....,\n 0.0000],\n [0.3094, 0.0000, 0.0000, 0.0000, 0.2079, 0.2945, 0.0000, 0.0000, 0.0000,\n 0.2513]]).nelement tests/test_build_sparse_adjacency.py:119: AssertionError =========================== short test summary info ============================ FAILED tests/test_architectures.py::test_metric_conv_net[False] - AttributeEr... FAILED tests/test_architectures.py::test_linear_metric_net - TypeError: forwa... FAILED tests/test_build_sparse_adjacency.py::TestBuildSparseAdjacency::test_build_sparse_adjacency_symmetric ========================= 3 failed, 9 passed in 1.37s ==========================