diff --git a/.gitignore b/.gitignore
index b6e47617..6c9cc340 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,7 +14,7 @@ dist/
downloads/
eggs/
.eggs/
-lib/
+# lib/
lib64/
parts/
sdist/
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..d07b6df4
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2021, Intel NRC Ecosystem
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/README.md b/README.md
index 4e1d7929..48619cf3 100644
--- a/README.md
+++ b/README.md
@@ -1,2 +1,109 @@
-# lava-dl
-Deep learning using Lava
+# Lava DL
+
+__`lava-dl`__ is a library of deep learning tools, which consists of `lava.lib.dl.slayer` and `lava.lib.dl.netx` for training and deployment of event-based deep neural networks on traditional as well as neuromorphic backends.
+
+## Lava-dl Workflow
+
+
+
+
+
+## __`lava.lib.dl.slayer`__
+
+`lava.lib.dl.slayer` is an enhanced version of [SLAYER](https://github.com/bamsumit/slayerPytorch). Most noteworthy enhancements are: support for _recurrent network structures_, a wider variety of _neuron models_ and _synaptic connections_ (a complete list of features is [here](https://github.com/lava-nc/lava-dl/blob/main/lib/dl/slayer/README.md)). This version of SLAYER is built on top of the [PyTorch](https://pytorch.org/) deep learning framework, similar to its predecessor. For smooth integration with Lava, `lava.lib.dl.slayer` supports exporting trained models using the platform independent __hdf5 network exchange__ format.
+
+In future versions, SLAYER will get completely integrated into Lava to train Lava Processes directly. This will eliminate the need for explicitly exporting and importing the trained networks.
+
+### Example Code
+
+__Import modules__
+```python
+import lava.lib.dl.slayer as slayer
+```
+__Network Description__
+```python
+# like any standard pyTorch network
+class Network(torch.nn.Module):
+ def __init__(self):
+ ...
+ self.blocks = torch.nn.ModuleList([# sequential network blocks
+ slayer.block.sigma_delta.Input(sdnn_params),
+ slayer.block.sigma_delta.Conv(sdnn_params, 3, 24, 3),
+ slayer.block.sigma_delta.Conv(sdnn_params, 24, 36, 3),
+ slayer.block.rf_iz.Conv(rf_params, 36, 64, 3, delay=True),
+ slayer.block.rf_iz.Conv(sdnn_cnn_params, 64, 64, 3, delay=True),
+ slayer.block.rf_iz.Flatten(),
+ slayer.block.alif.Dense(alif_params, 64*40, 100, delay=True),
+ slayer.block.cuba.Recurrent(cuba_params, 100, 50),
+ slayer.block.cuba.KWTA(cuba_params, 50, 50, num_winners=5)
+ ])
+
+ def forward(self, x):
+ for block in self.blocks:
+ # forward computation is as simple as calling the blocks in a loop
+ x = block(x)
+ return x
+
+ def export_hdf5(self, filename):
+ # network export to hdf5 format
+ h = h5py.File(filename, 'w')
+ layer = h.create_group('layer')
+ for i, b in enumerate(self.blocks):
+ b.export_hdf5(layer.create_group(f'{i}'))
+```
+__Training__
+```python
+net = Network()
+...
+for epoch in range(epochs):
+ for i, (input, ground_truth) in enumerate(train_loader):
+ out = net(input)
+ ...
+ for i, (input, ground_truth) in enumerate(test_loader):
+ out = net(input)
+ ...
+```
+__Export the network__
+```python
+net.export_hdf5('network.net')
+```
+
+## __`lava.lib.dl.netx`__
+
+For inference using Lava, `lava.lib.dl.netx` provides an automated API for loading SLAYER-trained models as Lava Processes, which can be directly run on a desired backend. `lava.lib.dl.netx` imports models saved via SLAYER using the hdf5 network exchange format. The details of hdf5 network description specification can be found [here](https://github.com/lava-nc/lava-dl/blob/main/lib/dl/netx/README.md).
+
+### Example Code
+
+__Import modules__
+```python
+from lava.lib.dl.netx import hdf5
+```
+__Load the trained network__
+```python
+# Import the model as a Lava Process
+net = hdf5.Network(net_config='network.net')
+```
+__Attach Processes for Input Injection and Output Readout__
+```python
+from lava.proc.io import InputLoader, BiasWriter, OutputReader
+
+# Instantiate the processes
+input_loader = InputLoader(dataset=testing_set)
+bias_writer = BiasWriter(shape=input_shape)
+output = OutputReader()
+
+# Connect the input to the network:
+input_loader.data_out.connect(bias_writer.bias_in)
+bias_writer.bias_out.connect(net.in_layer.bias)
+
+# Connect network-output to the output process
+net.out_layer.neuron.s_out.connect(output.net_output_in)
+```
+__Run the network__
+```python
+from lava.magma import run_configs as rcfg
+from lava.magma import run_conditions as rcnd
+
+net.run(condition=rcnd.RunSteps(total_run_time), run_cfg=rcfg.Loihi1SimCfg())
+```
+
diff --git a/lib/dl/netx/README.md b/lib/dl/netx/README.md
new file mode 100644
index 00000000..667e1f19
--- /dev/null
+++ b/lib/dl/netx/README.md
@@ -0,0 +1,57 @@
+# Lava-dl-netx
+
+`lava.lib.dl.netx` automates deep learning network model exchange to/from LAVA from/to other frameworks. At the moment, we support a simple platform independent hdf5 network description protocol. In furture we will extend network exchange support to other neural network exchange formats.
+
+Loading a model to Lava is as simple as:
+```python
+from lava.lib.dl.netx import hdf5
+# Import the model as a Lava Process
+net = hdf5.Network(net_config='network.net')
+```
+
+
+
+The hdf5 network description protocol is described below:
+
+## HDF5 description protocol
+* The computational graph is represented layer-wise in the `layer` field of the hdf5 file.
+* The layer id is assigned serially from `0` to `n-1` starting from input to output.
+ * By default, a sequential connection is assumed.
+ * Skip/recurrent connections are preceded by a concatenate layer that connects to pervious layer plus a list of non-sequential layers identified by their id.
+ * Each layer entry consts a minimum of `shape` and `type` field. Other relevant fileds can be addes as necessary.
+ * `shape` entry is a tuple/list in (x, y, z) format.
+ * `type` entry is a string that describes the layer type. See below for a list of supported types.
+ * `neuron` field describes the compartment model and it's parameter.
+ * default `neuron` type is `CUBA-LIF`.
+```
+|
+|->layer # description of network layer blocks such as input, dense, conv, pool, flatten, average
+| |->0
+| | |->{shape, type, ...} # each layer description has at least shape and type attribute
+| |->1
+| | |->{shape, type, ...}
+| :
+| |->n-1
+| |->{shape, type, ...}
+|
+| # other fields (not used for network exchange)
+|->simulation # simulation description
+| |->Ts # sampling time. Usually 1
+| |->tSample # length of the sample to run
+```
+
+### Supported layer types
+```
+input : {shape, type}
+flatten: {shape, type}
+average: {shape, type}
+concat : {shape, type, layers}
+dense : {shape, type, neuron, inFeatures, outFeatures, weight, delay(if available)}
+pool : {shape, type, neuron, kernelSize, stride, padding, dilation, weight}
+conv : {shape, type, neuron, inChannels, outChannels, kernelSize, stride,
+ | padding, dilation, groups, weight, delay(if available)}
+ |
+ |-> this is the description of the compartment parameters
+ |-> {iDecay, vDecay, vThMant, refDelay, ... (other additional params)}
+```
+
diff --git a/lib/dl/slayer/README.md b/lib/dl/slayer/README.md
new file mode 100644
index 00000000..e8d9eab4
--- /dev/null
+++ b/lib/dl/slayer/README.md
@@ -0,0 +1,114 @@
+# Lava-dl-SLAYER
+
+`lava.lib.dl.slayer` is an enhanced version of [SLAYER](https://github.com/bamsumit/slayerPytorch). It now supports a wide variety of learnable event-based _neuron models_, _synapse_, _axon_, and _dendrite_ properties. Other enhancements include various utilities useful during training for event IO, visualization,and filtering as well as logging of training statistics. Here are the key new feature highlights:
+
+* Resonator, Adaptive leaky neuron dynamics in addtion to conventional Leaky neuron dynamics
+* Sigma-Delta wrapper around arbitrary neuron dynamics
+* Graded spikes
+* Learnable neuron parameters at a granularity of individual neuron
+* Persistent states between iterations for robotics application
+* Arbitrary recurrent architectures including k-winner-take-all (KWTA)
+* Complex valued synapses
+* Sparse connectivity with connection masking
+* Runtime shape identification (eliminates the need for _a priori_ architecture shape calculation)
+
+The overall feature organization is described below.
+
+### Spike (`slayer.spike`)
+SLAYER supports binary as well as graded spikes, which are amenable to backpropagation. This opens a door for a new class of neuron behavior.
+
+### Neuron (`slayer.neuron`)
+Neuron models in SLAYER are built around custom CUDA accelerated fundamental linear dynamics. Each neuron model has individually learnable parameters from its neural dynamicsas well as persistent state behavior between iterations. Following neuron dynamics are supported.
+* Leaky Integrator
+* Resonator
+* Adaptive Integrator with Refractory Dynamics
+* Adaptive Resonator with Refractory Dynamics
+
+These fundamental dynamics can be combined to build a variety of neuron models. Following neuron models are currently supported:
+
+#### CUrrent BAsed leaky integrator: `slayer.neuron.cuba`
+
+
+
+
+#### Adaptive Leaky Integrate and Fire: `slayer.neuron.alif`
+
+
+
+
+#### Resonate and Fire (phase threshold and Izhikevich variant): `slayer.neuron.{rf, rf_iz}`
+
+
+
+
+#### Adaptive resonators: `slayer.neuron.{adrf, adrf_iz}`
+
+
+
+
+#### Sigma Delta neuron with arbitrary activation: `slayer.neuron.sigma_delta`
+
+
+
+
+In addition, SLAYER also supports _neuron dropout_ and quantization ready batch-normalization methods.
+
+### Synapse (`slayer.syanpse`)
+
+SLAYER supports dense, conv, and pool synaptic connections. Masking is possible in both real as well as complex connections: `slayer.synapse.{complex}.{Dense, Conv, Pool}`.
+
+### Axon (`slayer.axon`)
+
+* learnable axonal delay (`slayer.axon.Delay`)
+* learnable delta encoder (`slayer.axon.Delta`)
+
+### Dendrite (`slayer.dendrite`)
+
+* Sigma decoder (`slayer.dendrite.Sigma`)
+
+### Blocks (`slayer.blocks`)
+
+SLAYER provides easy encapsulation of neuron, synapse, axon, and dendrite classes for a variety of standard neuron-connection combinations:
+`slayer.block.{cuba, alif, rf, rf_iz, sigma_delta}.{input, output, dense, conv, pool, kwta, recurrent}`
+These blocks can be easily used to define a network and export it in pytorch as well as our platform independent hdf5 format.
+
+```python
+# like any standard pyTorch network
+class Network(torch.nn.Module):
+ def __init__(self):
+ ...
+ self.blocks = torch.nn.ModuleList([# sequential network blocks
+ slayer.block.sigma_delta.Input(sdnn_params),
+ slayer.block.sigma_delta.Conv(sdnn_params, 3, 24, 3),
+ slayer.block.sigma_delta.Conv(sdnn_params, 24, 36, 3),
+ slayer.block.rf_iz.Conv(rf_params, 36, 64, 3, delay=True),
+ slayer.block.rf_iz.Conv(sdnn_cnn_params, 64, 64, 3, delay=True),
+ slayer.block.rf_iz.Flatten(),
+ slayer.block.alif.Dense(alif_params, 64*40, 100, delay=True),
+ slayer.block.cuba.Recurrent(cuba_params, 100, 50),
+ slayer.block.cuba.KWTA(cuba_params, 50, 50, num_winners=5)
+ ])
+
+ def forward(self, x):
+ for block in self.blocks:
+ # forward computation is as simple as calling the blocks in a loop
+ x = block(x)
+ return x
+
+ def export_hdf5(self, filename):
+ # network export to hdf5 format
+ h = h5py.File(filename, 'w')
+ layer = h.create_group('layer')
+ for i, b in enumerate(self.blocks):
+ b.export_hdf5(layer.create_group(f'{i}'))
+```
+
+
+
+
+### Fundamental Practices
+
+* Tensors are always assumed to be in the order `NCHWT` or `NCT` where `N`:Batch, `C`:Channel, `H`: Height(y), `W`: Width(x) and `T`: Time.
+ * `NCHW` is the default PyTorch ordering.
+* Synapse values are maintained in scaled down range.
+* Neurons hold the shape of the layer. It shall be automatically identified on runtime.