diff --git a/export.py b/export.py index 5baaf8a94e99..55908c075f95 100644 --- a/export.py +++ b/export.py @@ -180,7 +180,7 @@ def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" subprocess.check_output(cmd.split()) # export - with open(Path(f) / 'meta.yaml', 'w') as g: + with open(Path(f) / file.with_suffix('.yaml'), 'w') as g: yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') diff --git a/models/common.py b/models/common.py index 9f6ca0eb09bf..547a4c946a9f 100644 --- a/models/common.py +++ b/models/common.py @@ -323,9 +323,12 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend - stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults w = attempt_download(w) # download if not local fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 + stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults + if data: # assign class names (optional) + with open(data, errors='ignore') as f: + names = yaml.safe_load(f)['names'] if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) @@ -365,7 +368,9 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) executable_network = ie.compile_model(model=network, device_name="CPU") output_layer = next(iter(executable_network.outputs)) - self._load_metadata(w.parent / 'meta.yaml') # load metadata + meta = w.with_suffix('.yaml') + if meta.exists(): + stride, names = self._load_metadata(meta) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download @@ -431,11 +436,7 @@ def wrap_frozen_graph(gd, inputs, outputs): output_details = interpreter.get_output_details() # outputs elif tfjs: raise Exception('ERROR: YOLOv5 TF.js inference is not supported') - self.__dict__.update(locals()) # assign all variables to self - if not hasattr(self, 'names') and data: # assign class names (optional) - with open(data, errors='ignore') as f: - names = yaml.safe_load(f)['names'] def forward(self, im, augment=False, visualize=False, val=False): # YOLOv5 MultiBackend inference @@ -495,13 +496,6 @@ def forward(self, im, augment=False, visualize=False, val=False): y = torch.tensor(y, device=self.device) return (y, []) if val else y - def _load_metadata(self, f='path/to/meta.yaml'): - # Load metadata from meta.yaml if it exists - if Path(f).is_file(): - with open(f, errors='ignore') as f: - for k, v in yaml.safe_load(f).items(): - setattr(self, k, v) # assign stride, names - def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb @@ -522,6 +516,13 @@ def model_type(p='path/to/model.pt'): tflite &= not edgetpu # *.tflite return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs + @staticmethod + def _load_metadata(f='path/to/meta.yaml'): + # Load metadata from meta.yaml if it exists + with open(f, errors='ignore') as f: + d = yaml.safe_load(f) + return d['stride'], d['names'] # assign stride, names + class AutoShape(nn.Module): # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS