From 23e7abe8362e099c2a82c6d535674ffb2891e106 Mon Sep 17 00:00:00 2001 From: "Dr. Alexander Henkes" <62153181+ahenkes1@users.noreply.github.com> Date: Sat, 9 Sep 2023 05:22:21 +0200 Subject: [PATCH] Affine hdf5 export (#221) (#222) * E501. The test file failed with message: 'test_cuba.py:19:80: E501 line too long (80 > 79 characters)' Fixed by reformatting. * Added test for Affine hdf5 export. The test will fail, if a network consisting of an Affine layer cannot be exported. This is the case, e.g., if the layer has no shape ('none'). In that case, h5py will interpret it as an ('o') object, which is not supported by the hdf5 format. * Dense -> Affine. * Added shape. The shape of the Affine layer is set with respect to the output of the respective Dense layer. Previously this was 'None', resulting in an error while exporting to hdf5 due to being interpreted as object type by h5py. Now everything works due to being set to Float. * Changed the shape to 'torch.Size()'. * Added two tests for different modes of the "dynamics" flag. Currently, both tests are failing. This is due to 'l.394' of '/lib/dl/slayer/block/base.py' not being executed. This has to be fixed (?). * Some corrections to the temp_file names. * Added 'vThMant' export in 'base.py'. * Added modiefied tests. Still failing. * 'neuron' in ... -> 'neuron' not in. 1/2 pass. * Set threshold=-1 and corrected 1 << 17 to 1 << 18. * Check 'vThMant' in h5 file. --------- Co-authored-by: PhilippPlank <32519998+PhilippPlank@users.noreply.github.com> Co-authored-by: bamsumit --- src/lava/lib/dl/slayer/block/base.py | 7 +- src/lava/lib/dl/slayer/block/cuba.py | 9 +- tests/lava/lib/dl/slayer/block/test_cuba.py | 117 +++++++++++++++----- 3 files changed, 102 insertions(+), 31 deletions(-) diff --git a/src/lava/lib/dl/slayer/block/base.py b/src/lava/lib/dl/slayer/block/base.py index f491c91e..546f1600 100644 --- a/src/lava/lib/dl/slayer/block/base.py +++ b/src/lava/lib/dl/slayer/block/base.py @@ -391,8 +391,11 @@ def delay(d): self.delay.clamp() # clamp the delay value handle.create_dataset('delay', data=delay(self.delay)) - # for key, value in self.neuron.device_params.items(): - # handle.create_dataset(f'neuron/{key}', data=value) + if self.dynamics is True: + for key, value in self.neuron.device_params.items(): + if key == 'vThMant': + value = (1 << 18) - 1 # set the maximum possible threshold + handle.create_dataset(f'neuron/{key}', data=value) class AbstractTimeDecimation(torch.nn.Module): diff --git a/src/lava/lib/dl/slayer/block/cuba.py b/src/lava/lib/dl/slayer/block/cuba.py index 14b71be0..2c46dcda 100644 --- a/src/lava/lib/dl/slayer/block/cuba.py +++ b/src/lava/lib/dl/slayer/block/cuba.py @@ -3,6 +3,7 @@ """CUBA-LIF layer blocks""" +import numpy as np import torch from . import base @@ -68,7 +69,13 @@ def __init__(self, *args, **kwargs): self.synapse = synapse.Dense(**self.synapse_params) if 'pre_hook_fx' not in kwargs.keys(): self.synapse.pre_hook_fx = self.neuron.quantize_8bit - self.neuron._threshold = None + # if 'dynamics=True', set threshold to not 'none' value + if self.dynamics: + self.neuron._threshold = -1 + else: + self.neuron._threshold = None + # set the shape according to synapse output + self.neuron.shape = torch.Size([self.synapse.out_channels]) # this disables spike and reset in dynamics del self.synapse_params diff --git a/tests/lava/lib/dl/slayer/block/test_cuba.py b/tests/lava/lib/dl/slayer/block/test_cuba.py index 6d2e1d69..a55858d8 100644 --- a/tests/lava/lib/dl/slayer/block/test_cuba.py +++ b/tests/lava/lib/dl/slayer/block/test_cuba.py @@ -15,8 +15,9 @@ from lava.proc.conv import utils from lava.proc import io -verbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False -# Enabling torch sometimes causes multiprocessing error, especially in unittests +verbose = True if (("-v" in sys.argv) or ("--verbose" in sys.argv)) else False +# Enabling torch sometimes causes multiprocessing error, +# especially in unittests utils.TORCH_IS_AVAILABLE = False # seed = np.random.randint(1000) @@ -25,27 +26,81 @@ torch.manual_seed(seed) torch.cuda.manual_seed(seed) if verbose: - print(f'{seed=}') + print(f"{seed=}") if torch.cuda.is_available(): - device = torch.device('cuda') + device = torch.device("cuda") else: if verbose: - print('CUDA is not available in the system. ' - 'Testing for CPU version only.') - device = torch.device('cpu') + print( + "CUDA is not available in the system. " + "Testing for CPU version only." + ) + device = torch.device("cpu") -tempdir = os.path.dirname(__file__) + '/temp' +tempdir = os.path.dirname(__file__) + "/temp" os.makedirs(tempdir, exist_ok=True) -neuron_param = {'threshold': 0.5, - 'current_decay': 0.5, - 'voltage_decay': 0.5} +neuron_param = {"threshold": 0.5, "current_decay": 0.5, "voltage_decay": 0.5} class TestCUBA(unittest.TestCase): """Test CUBA blocks""" + def test_affine_block_hdf5_export_dynamics_false(self): + """Test affine block hdf5 export in dynamics=false mode.""" + in_features = 10 + out_features = 5 + + net = slayer.block.cuba.Affine( + neuron_params=neuron_param, + in_neurons=in_features, + out_neurons=out_features, + dynamics=False, + count_log=False, + ) + + # export slayer network + h = h5py.File(tempdir + "/cuba_affine_dynamics_false.net", "w") + net.export_hdf5(h.create_group("layer/0")) + + # reload net from h5 and check if 'neuron' exists. + lava_net = netx.hdf5.Network( + net_config=tempdir + "/cuba_affine_dynamics_false.net" + ) + + self.assertTrue("neuron" not in lava_net.net_config["layer"][0].keys()) + + def test_affine_block_hdf5_export_dynamics_true(self): + """Test affine block hdf5 export in dynamics=true mode.""" + in_features = 10 + out_features = 5 + + net = slayer.block.cuba.Affine( + neuron_params=neuron_param, + in_neurons=in_features, + out_neurons=out_features, + dynamics=True, + count_log=False, + ) + + # export slayer network + h = h5py.File(tempdir + "/cuba_affine_dynamics_true.net", "w") + net.export_hdf5(h.create_group("layer/0")) + + # reload net from h5 and check if 'vThMant' is '(1 << 17)'. + # lava_net = netx.hdf5.Network( + # net_config=tempdir + "/cuba_affine_dynamics_true.net" + # ) + # layer = lava_net.layers[0] + # neuron = layer.__dict__["neuron"].__dict__ + + # load network file and check neuron + with h5py.File(tempdir + "/cuba_affine_dynamics_true.net", "r") as hf: + vThMant = np.array(hf["layer"]["0"]["neuron"]["vThMant"]) + + self.assertTrue(vThMant == (1 << 18) - 1) + def test_dense_block(self): """Test dense block with lava process implementation.""" in_features = 10 @@ -58,17 +113,18 @@ def test_dense_block(self): y = net(x) # export slayer network - net.export_hdf5(h5py.File(tempdir + '/cuba_dense.net', - 'w').create_group('layer/0')) + net.export_hdf5( + h5py.File(tempdir + "/cuba_dense.net", "w").create_group("layer/0") + ) # create equivalent lava network using netx and evaluate output - lava_net = netx.hdf5.Network(net_config=tempdir + '/cuba_dense.net') + lava_net = netx.hdf5.Network(net_config=tempdir + "/cuba_dense.net") source = io.source.RingBuffer(data=x[0]) sink = io.sink.RingBuffer(shape=lava_net.out.shape, buffer=time_steps) source.s_out.connect(lava_net.inp) lava_net.out.connect(sink.a_in) run_condition = RunSteps(num_steps=time_steps) - run_config = Loihi1SimCfg(select_tag='fixed_pt') + run_config = Loihi1SimCfg(select_tag="fixed_pt") lava_net.run(condition=run_condition, run_cfg=run_config) output = sink.data.get() lava_net.stop() @@ -76,9 +132,9 @@ def test_dense_block(self): if verbose: print() print(lava_net) - print('slayer output:') + print("slayer output:") print(y[0]) - print('lava output:') + print("lava output:") print(output) self.assertTrue(np.abs(y[0].data.numpy() - output).sum() == 0) @@ -93,25 +149,30 @@ def test_conv_block(self): time_steps = 10 # create slayer network and evaluate output - net = slayer.block.cuba.Conv(neuron_param, - in_features, out_features, kernel_size) - x = (torch.rand([1, in_features, - height, width, time_steps]) > 0.5).float() + net = slayer.block.cuba.Conv( + neuron_param, in_features, out_features, kernel_size + ) + x = ( + torch.rand([1, in_features, height, width, time_steps]) > 0.5 + ).float() y = net(x).permute((0, 3, 2, 1, 4)) # export slayer network - net.export_hdf5(h5py.File(tempdir + '/cuba_conv.net', - 'w').create_group('layer/0')) + net.export_hdf5( + h5py.File(tempdir + "/cuba_conv.net", "w").create_group("layer/0") + ) # create equivalent lava network using netx and evaluate output - lava_net = netx.hdf5.Network(net_config=tempdir + '/cuba_conv.net', - input_shape=(width, height, in_features)) + lava_net = netx.hdf5.Network( + net_config=tempdir + "/cuba_conv.net", + input_shape=(width, height, in_features), + ) source = io.source.RingBuffer(data=x[0].permute((2, 1, 0, 3))) sink = io.sink.RingBuffer(shape=lava_net.out.shape, buffer=time_steps) source.s_out.connect(lava_net.inp) lava_net.out.connect(sink.a_in) run_condition = RunSteps(num_steps=time_steps) - run_config = Loihi1SimCfg(select_tag='fixed_pt') + run_config = Loihi1SimCfg(select_tag="fixed_pt") lava_net.run(condition=run_condition, run_cfg=run_config) output = sink.data.get() lava_net.stop() @@ -119,9 +180,9 @@ def test_conv_block(self): if verbose: print() print(lava_net) - print('slayer output:') + print("slayer output:") print(y[0][0, 0, 0]) - print('lava output:') + print("lava output:") print(output[0, 0, 0]) self.assertTrue(np.abs(y[0].data.numpy() - output).sum() == 0)