diff --git a/.gitignore b/.gitignore index a4abf29ab..c3e722ab8 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,5 @@ **/__pycache__/ /super-linter.log /super-linter.report -/venv/ +/venv*/ +/typings/ diff --git a/README.md b/README.md index dc7b13cb4..2236c3c24 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # ADI MAX78000/MAX78002 Model Training and Synthesis -June 14, 2023 +June 27, 2023 ADI’s MAX78000/MAX78002 project is comprised of five repositories: @@ -2681,7 +2681,69 @@ Certain networks share weights between layers. The tools automatically deduplica Example: `weight_source: conv1p3` -##### Dropout and Batch Normalization +#### Rolling Buffers + +Certain networks (such as TCN) require rolling data buffers. These are implemented in the YAML file (for a complete example, please see `networks/ai85-kinetics-actiontcn.yaml`). + +##### `data_buffer` (Global) + +The data buffer is allocated and named using the global `data_buffer` configuration key. `processor`, `dim` (1D or 2D), `channels`, and `offset` must be defined. + +Example: + +```yaml +data_buffer: + - processors: 0xffffffff00000000 + dim: 15 + channels: 32 + offset: 0x7fc4 + name: tcn_buffer +``` + +##### `buffer_shift` (per layer) + +The buffer is shifted `n` places using `buffer_shift: n`. `in_offset` and `in_dim` are required. + +Example: + +```yaml +- processors: 0xffffffff00000000 + output_processors: 0xffffffff00000000 + in_offset: 0x7FC8 + in_channels: 32 + in_dim: 14 + in_sequences: tcn_buffer + out_offset: 0x7fc4 + operation: passthrough + buffer_shift: 1 + name: buffer_shift +``` + +##### `buffer_insert` (per layer) + +New data is added using `buffer_insert: n`. + +Example: + +```yaml + - processors: 0xffffffffffffffff + output_processors: 0xffffffff00000000 + in_offset: 0x5000 + out_offset: 0x7FFC + operation: Conv2d + in_sequences: res4_out + buffer_insert: 1 + kernel_size: 3x3 + pad: 0 + activate: ReLU + name: conv5 +``` + +##### Buffer use + +The buffer is used with `in_sequences`, in the example `in_sequences: tcn_buffer`. To use the buffer contents as input, `in_offset` and `in_dim` are required. + +#### Dropout and Batch Normalization * Dropout is only used during training, and corresponding YAML entries are not needed. * Batch normalization (“batchnorm”) is fused into the preceding layer’s weights and bias values (see [Batch Normalization](#batch-normalization)), and YAML entries are not needed. diff --git a/README.pdf b/README.pdf index bef75d64a..c67b56520 100644 Binary files a/README.pdf and b/README.pdf differ diff --git a/datasets/afsk.py b/datasets/afsk.py index 580fbd813..ea209a97b 100644 --- a/datasets/afsk.py +++ b/datasets/afsk.py @@ -44,7 +44,7 @@ def __init__(self, root, train, transform=None): with open(os.path.join(self.processed_path, self.train1fn), 'rb') as fd: onebits = np.fromfile(fd, dtype=np.uint8) else: - raise Exception('Unable to locate training data') + raise RuntimeError('Unable to locate training data') # Make available an equal amount from each classification numbitper = int(min([len(zerobits), len(onebits)]) / BYTES_PER_SAMPLE) @@ -71,10 +71,12 @@ def __len__(self): return self.avail def __getitem__(self, idx): + assert self.data is not None and self.avail is not None + # Index [0 avail) to byte offset offs = idx * BYTES_PER_SAMPLE - sampl = self.data[offs:offs + BYTES_PER_SAMPLE].astype(np.float) + sampl = self.data[offs:offs + BYTES_PER_SAMPLE].astype(np.float64) # min-max normalization (rescaling) _min = sampl.min() diff --git a/datasets/aisegment.py b/datasets/aisegment.py index 72288c378..2380736c9 100644 --- a/datasets/aisegment.py +++ b/datasets/aisegment.py @@ -360,7 +360,7 @@ def __getitem__(self, index): if self.transform is not None: img = self.transform(img) - return img, lbl.astype(np.long) + return img, lbl.astype(np.int64) def AISegment_get_datasets(data, load_train=True, load_test=True, im_size=(80, 80), diff --git a/datasets/camvid.py b/datasets/camvid.py index 0ba3ec9da..ae69cfadf 100644 --- a/datasets/camvid.py +++ b/datasets/camvid.py @@ -74,8 +74,8 @@ def __init__(self, root_dir, d_type, classes=None, download=True, transform=None lbl = np.zeros((lbl_rgb.shape[0], lbl_rgb.shape[1]), dtype=np.uint8) for label_idx, (_, mask) in enumerate(self.label_mask_dict.items()): - res = (lbl_rgb == mask) - res = (label_idx+1) * res.all(axis=2) + res = lbl_rgb == mask + res = (label_idx + 1) * res.all(axis=2) lbl += res.astype(np.uint8) y_start = 0 @@ -153,7 +153,7 @@ def __len__(self): def __getitem__(self, idx): if self.transform is not None: img = self.transform(self.img_list[idx]) - return img, self.lbl_list[idx].astype(np.long) + return img, self.lbl_list[idx].astype(np.int64) def camvid_get_datasets_s80(data, load_train=True, load_test=True, num_classes=33): diff --git a/datasets/kinetics.py b/datasets/kinetics.py index bd8a92e4e..bba5ba8ed 100644 --- a/datasets/kinetics.py +++ b/datasets/kinetics.py @@ -388,7 +388,7 @@ def __getitem__(self, index): (imgs, lab, _) = self.dataset[index] - start_ind = np.random.randint(low=0, high=(len(imgs)-self.num_frames_model+1)) + start_ind = np.random.randint(low=0, high=len(imgs) - self.num_frames_model + 1) images = imgs[start_ind:start_ind+self.num_frames_model] transforms_album = [] diff --git a/datasets/msnoise.py b/datasets/msnoise.py index d983284ef..58e232c29 100644 --- a/datasets/msnoise.py +++ b/datasets/msnoise.py @@ -301,7 +301,7 @@ def __gen_datasets(self, exp_len=16384, row_len=128, overlap_ratio=0, record, fs = librosa.load(record_path, offset=0, sr=None) rec_len = np.size(record) max_start_time = \ - ((rec_len / fs - 1) - (rec_len / fs % noise_time_step)) + (rec_len / fs - 1) - (rec_len / fs % noise_time_step) for start_time in np.arange(0, int((max_start_time+noise_time_step)*fs), int(noise_time_step*fs)): diff --git a/test_qat.py b/test_qat.py index 5d5328705..7deda37f0 100755 --- a/test_qat.py +++ b/test_qat.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 ################################################################################################### # -# Copyright (C) 2020-2021 Maxim Integrated Products, Inc. All Rights Reserved. +# Copyright (C) 2020-2023 Maxim Integrated Products, Inc. All Rights Reserved. # # Maxim Integrated Products, Inc. Default Copyright Notice: # https://www.maximintegrated.com/en/aboutus/legal/copyrights.html @@ -21,7 +21,7 @@ def create_input_data(num_channels): ''' Creates random data ''' - inp = (2.0 * torch.rand(1, num_channels, 8, 8) - 1.0) # pylint: disable=no-member + inp = 2.0 * torch.rand(1, num_channels, 8, 8) - 1.0 # pylint: disable=no-member inp_int = torch.clamp(torch.round(128 * inp), min=-128, max=127.) # pylint: disable=no-member inp = inp_int / 128. diff --git a/train.py b/train.py index 4d508700c..e1ca09c58 100644 --- a/train.py +++ b/train.py @@ -7,6 +7,8 @@ # https://www.maximintegrated.com/en/aboutus/legal/copyrights.html # ################################################################################################### +# pyright: reportMissingModuleSource=false, reportGeneralTypeIssues=false +# pyright: reportOptionalSubscript=false # # Portions Copyright (c) 2018 Intel Corporation # @@ -401,6 +403,7 @@ def main(): args.workers, args.validation_split, args.deterministic, args.effective_train_size, args.effective_valid_size, args.effective_test_size, test_only=args.evaluate, collate_fn=args.collate_fn, cpu=args.device == 'cpu') + assert train_loader is not None and val_loader is not None if args.sensitivity is not None: sensitivities = np.arange(args.sensitivity_range[0], args.sensitivity_range[1],