Skip to content

Commit

Permalink
Update README; NumPy 1.24 forward compatibility; additional linter co…
Browse files Browse the repository at this point in the history
…mpatibility (#237)
  • Loading branch information
Robert Muchsel authored Jun 29, 2023
1 parent 8bf55d3 commit d2bec85
Show file tree
Hide file tree
Showing 10 changed files with 81 additions and 13 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,5 @@
**/__pycache__/
/super-linter.log
/super-linter.report
/venv/
/venv*/
/typings/
66 changes: 64 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# ADI MAX78000/MAX78002 Model Training and Synthesis

June 14, 2023
June 27, 2023

ADI’s MAX78000/MAX78002 project is comprised of five repositories:

Expand Down Expand Up @@ -2681,7 +2681,69 @@ Certain networks share weights between layers. The tools automatically deduplica
Example:
`weight_source: conv1p3`
##### Dropout and Batch Normalization
#### Rolling Buffers
Certain networks (such as TCN) require rolling data buffers. These are implemented in the YAML file (for a complete example, please see `networks/ai85-kinetics-actiontcn.yaml`).
##### `data_buffer` (Global)
The data buffer is allocated and named using the global `data_buffer` configuration key. `processor`, `dim` (1D or 2D), `channels`, and `offset` must be defined.
Example:
```yaml
data_buffer:
- processors: 0xffffffff00000000
dim: 15
channels: 32
offset: 0x7fc4
name: tcn_buffer
```
##### `buffer_shift` (per layer)
The buffer is shifted `n` places using `buffer_shift: n`. `in_offset` and `in_dim` are required.
Example:
```yaml
- processors: 0xffffffff00000000
output_processors: 0xffffffff00000000
in_offset: 0x7FC8
in_channels: 32
in_dim: 14
in_sequences: tcn_buffer
out_offset: 0x7fc4
operation: passthrough
buffer_shift: 1
name: buffer_shift
```
##### `buffer_insert` (per layer)
New data is added using `buffer_insert: n`.
Example:
```yaml
- processors: 0xffffffffffffffff
output_processors: 0xffffffff00000000
in_offset: 0x5000
out_offset: 0x7FFC
operation: Conv2d
in_sequences: res4_out
buffer_insert: 1
kernel_size: 3x3
pad: 0
activate: ReLU
name: conv5
```
##### Buffer use
The buffer is used with `in_sequences`, in the example `in_sequences: tcn_buffer`. To use the buffer contents as input, `in_offset` and `in_dim` are required.
#### Dropout and Batch Normalization
* Dropout is only used during training, and corresponding YAML entries are not needed.
* Batch normalization (“batchnorm”) is fused into the preceding layer’s weights and bias values (see [Batch Normalization](#batch-normalization)), and YAML entries are not needed.
Expand Down
Binary file modified README.pdf
Binary file not shown.
6 changes: 4 additions & 2 deletions datasets/afsk.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def __init__(self, root, train, transform=None):
with open(os.path.join(self.processed_path, self.train1fn), 'rb') as fd:
onebits = np.fromfile(fd, dtype=np.uint8)
else:
raise Exception('Unable to locate training data')
raise RuntimeError('Unable to locate training data')

# Make available an equal amount from each classification
numbitper = int(min([len(zerobits), len(onebits)]) / BYTES_PER_SAMPLE)
Expand All @@ -71,10 +71,12 @@ def __len__(self):
return self.avail

def __getitem__(self, idx):
assert self.data is not None and self.avail is not None

# Index [0 avail) to byte offset
offs = idx * BYTES_PER_SAMPLE

sampl = self.data[offs:offs + BYTES_PER_SAMPLE].astype(np.float)
sampl = self.data[offs:offs + BYTES_PER_SAMPLE].astype(np.float64)

# min-max normalization (rescaling)
_min = sampl.min()
Expand Down
2 changes: 1 addition & 1 deletion datasets/aisegment.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ def __getitem__(self, index):
if self.transform is not None:
img = self.transform(img)

return img, lbl.astype(np.long)
return img, lbl.astype(np.int64)


def AISegment_get_datasets(data, load_train=True, load_test=True, im_size=(80, 80),
Expand Down
6 changes: 3 additions & 3 deletions datasets/camvid.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ def __init__(self, root_dir, d_type, classes=None, download=True, transform=None
lbl = np.zeros((lbl_rgb.shape[0], lbl_rgb.shape[1]), dtype=np.uint8)

for label_idx, (_, mask) in enumerate(self.label_mask_dict.items()):
res = (lbl_rgb == mask)
res = (label_idx+1) * res.all(axis=2)
res = lbl_rgb == mask
res = (label_idx + 1) * res.all(axis=2)
lbl += res.astype(np.uint8)

y_start = 0
Expand Down Expand Up @@ -153,7 +153,7 @@ def __len__(self):
def __getitem__(self, idx):
if self.transform is not None:
img = self.transform(self.img_list[idx])
return img, self.lbl_list[idx].astype(np.long)
return img, self.lbl_list[idx].astype(np.int64)


def camvid_get_datasets_s80(data, load_train=True, load_test=True, num_classes=33):
Expand Down
2 changes: 1 addition & 1 deletion datasets/kinetics.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ def __getitem__(self, index):

(imgs, lab, _) = self.dataset[index]

start_ind = np.random.randint(low=0, high=(len(imgs)-self.num_frames_model+1))
start_ind = np.random.randint(low=0, high=len(imgs) - self.num_frames_model + 1)
images = imgs[start_ind:start_ind+self.num_frames_model]

transforms_album = []
Expand Down
2 changes: 1 addition & 1 deletion datasets/msnoise.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ def __gen_datasets(self, exp_len=16384, row_len=128, overlap_ratio=0,
record, fs = librosa.load(record_path, offset=0, sr=None)
rec_len = np.size(record)
max_start_time = \
((rec_len / fs - 1) - (rec_len / fs % noise_time_step))
(rec_len / fs - 1) - (rec_len / fs % noise_time_step)
for start_time in np.arange(0,
int((max_start_time+noise_time_step)*fs),
int(noise_time_step*fs)):
Expand Down
4 changes: 2 additions & 2 deletions test_qat.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
###################################################################################################
#
# Copyright (C) 2020-2021 Maxim Integrated Products, Inc. All Rights Reserved.
# Copyright (C) 2020-2023 Maxim Integrated Products, Inc. All Rights Reserved.
#
# Maxim Integrated Products, Inc. Default Copyright Notice:
# https://www.maximintegrated.com/en/aboutus/legal/copyrights.html
Expand All @@ -21,7 +21,7 @@ def create_input_data(num_channels):
'''
Creates random data
'''
inp = (2.0 * torch.rand(1, num_channels, 8, 8) - 1.0) # pylint: disable=no-member
inp = 2.0 * torch.rand(1, num_channels, 8, 8) - 1.0 # pylint: disable=no-member
inp_int = torch.clamp(torch.round(128 * inp), min=-128, max=127.) # pylint: disable=no-member
inp = inp_int / 128.

Expand Down
3 changes: 3 additions & 0 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
# https://www.maximintegrated.com/en/aboutus/legal/copyrights.html
#
###################################################################################################
# pyright: reportMissingModuleSource=false, reportGeneralTypeIssues=false
# pyright: reportOptionalSubscript=false
#
# Portions Copyright (c) 2018 Intel Corporation
#
Expand Down Expand Up @@ -401,6 +403,7 @@ def main():
args.workers, args.validation_split, args.deterministic,
args.effective_train_size, args.effective_valid_size, args.effective_test_size,
test_only=args.evaluate, collate_fn=args.collate_fn, cpu=args.device == 'cpu')
assert train_loader is not None and val_loader is not None

if args.sensitivity is not None:
sensitivities = np.arange(args.sensitivity_range[0], args.sensitivity_range[1],
Expand Down

0 comments on commit d2bec85

Please sign in to comment.