From 4d05472d2b50108c0fcfe9208d32cb067a6e21b0 Mon Sep 17 00:00:00 2001 From: Michel Promonet Date: Mon, 15 Aug 2022 23:42:25 +0200 Subject: [PATCH 001/326] Allow multiple-model serving from Flask REST API (#8973) * allow to serve multiple models using flask restapi * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * Update restapi.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/flask_rest_api/restapi.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 08036dd64490..8482435c861e 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run a Flask REST API exposing a YOLOv5s model +Run a Flask REST API exposing one or more YOLOv5s models """ import argparse @@ -11,12 +11,13 @@ from PIL import Image app = Flask(__name__) +models = {} -DETECTION_URL = "/v1/object-detection/yolov5s" +DETECTION_URL = "/v1/object-detection/" @app.route(DETECTION_URL, methods=["POST"]) -def predict(): +def predict(model): if request.method != "POST": return @@ -30,17 +31,18 @@ def predict(): im_bytes = im_file.read() im = Image.open(io.BytesIO(im_bytes)) - results = model(im, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient="records") + if model in models: + results = models[model](im, size=640) # reduce size=320 for faster inference + return results.pandas().xyxy[0].to_json(orient="records") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") parser.add_argument("--port", default=5000, type=int, help="port number") + parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') opt = parser.parse_args() - # Fix known issue urllib.error.HTTPError 403: rate limit exceeded https://github.com/ultralytics/yolov5/pull/7210 - torch.hub._validate_not_a_forked_repo = lambda a, b, c: True + for m in opt.model: + models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) - model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat From 1f5a28711defd98e353bef7655775a3c07c364c7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Aug 2022 13:33:59 +0200 Subject: [PATCH 002/326] Created using Colaboratory --- tutorial.ipynb | 51 +++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 540754b357c5..da893c3fe5ec 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1014,23 +1014,9 @@ "source": [ "# Appendix\n", "\n", - "Optional extras below. Unit tests validate repo functionality and should be run on any PRs submitted.\n" + "Additional content below for PyTorch Hub, CI, reproducing results, profiling speeds, VOC training, classification training and TensorRT example." ] }, - { - "cell_type": "code", - "metadata": { - "id": "mcKoSIK2WSzj" - }, - "source": [ - "# Reproduce\n", - "for x in 'yolov5n', 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --task speed # speed\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" - ], - "execution_count": null, - "outputs": [] - }, { "cell_type": "code", "metadata": { @@ -1080,6 +1066,20 @@ "execution_count": null, "outputs": [] }, + { + "cell_type": "code", + "metadata": { + "id": "mcKoSIK2WSzj" + }, + "source": [ + "# Reproduce\n", + "for x in (f'yolov5{x}' for x in 'nsmlx'):\n", + " !python val.py --weights {x}.pt --data coco.yaml --img 640 --task speed # speed\n", + " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" + ], + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "metadata": { @@ -1099,26 +1099,27 @@ { "cell_type": "code", "metadata": { - "id": "RVRSOhEvUdb5" + "id": "BSgFCAcMbk1R" }, "source": [ - "# Evolve\n", - "!python train.py --img 640 --batch 64 --epochs 100 --data coco128.yaml --weights yolov5s.pt --cache --noautoanchor --evolve\n", - "!d=runs/train/evolve && cp evolve.* $d && zip -r evolve.zip $d && gsutil mv evolve.zip gs://bucket # upload results (optional)" + "# VOC\n", + "for b, m in zip([64, 64, 64, 32, 16], [f'yolov5{x}' for x in 'nsmlx']): # batch, model\n", + " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.VOC.yaml --project VOC --name {m} --cache" ], "execution_count": null, "outputs": [] }, { "cell_type": "code", - "metadata": { - "id": "BSgFCAcMbk1R" - }, "source": [ - "# VOC\n", - "for b, m in zip([64, 64, 64, 32, 16], ['yolov5n', 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # batch, model\n", - " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.VOC.yaml --project VOC --name {m} --cache" + "# Classification\n", + "for m in *(f'yolov5{x}.pt' for x in 'nsmlx'), 'resnet50.pt', 'efficientnet_b0.pt':\n", + " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", + " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" ], + "metadata": { + "id": "UWGH7H6yakVl" + }, "execution_count": null, "outputs": [] }, From d5f543a1dfa6b0c91d91664b6b35f7f4406e9726 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Aug 2022 13:36:37 +0200 Subject: [PATCH 003/326] Created using Colaboratory --- tutorial.ipynb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index da893c3fe5ec..c39faab4b415 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1023,19 +1023,19 @@ "id": "GMusP4OAxFu6" }, "source": [ - "# PyTorch Hub\n", "import torch\n", "\n", - "# Model\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n", + "# PyTorch Hub Model\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom\n", "\n", "# Images\n", - "dir = 'https://ultralytics.com/images/'\n", - "imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images\n", + "img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list\n", "\n", "# Inference\n", - "results = model(imgs)\n", - "results.print() # or .show(), .save()" + "results = model(img)\n", + "\n", + "# Results\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." ], "execution_count": null, "outputs": [] From 464c2c67138dba780deb35a12baf99700dfa5742 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Aug 2022 16:49:32 +0200 Subject: [PATCH 004/326] Colab and Kaggle loggers removeHandler (#8985) Resolve duplicate outputs in colab. Also optimizes Colab and Kaggle environment checks. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 68 +++++++++++++++++++----------------------------- 1 file changed, 27 insertions(+), 41 deletions(-) diff --git a/utils/general.py b/utils/general.py index 65130ca57762..2a3ce37cd853 100755 --- a/utils/general.py +++ b/utils/general.py @@ -56,13 +56,35 @@ os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + + +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return bool(re.search('[\u4e00-\u9fff]', str(s))) + + +def is_colab(): + # Is environment a Google Colab instance? + return 'COLAB_GPU' in os.environ + + def is_kaggle(): # Is environment a Kaggle Notebook? - try: - assert os.environ.get('PWD') == '/kaggle/working' - assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + + +def is_docker() -> bool: + """Check if the process runs inside a docker container.""" + if Path("/.dockerenv").exists(): return True - except AssertionError: + try: # check if docker is in control groups + with open("/proc/self/cgroup") as file: + return any("docker" in line for line in file) + except OSError: return False @@ -82,7 +104,7 @@ def is_writeable(dir, test=False): def set_logging(name=None, verbose=VERBOSE): # Sets level and returns logger - if is_kaggle(): + if is_kaggle() or is_colab(): for h in logging.root.handlers: logging.root.removeHandler(h) # remove all handlers associated with the root logger object rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings @@ -228,42 +250,6 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' -def is_docker() -> bool: - """Check if the process runs inside a docker container.""" - if Path("/.dockerenv").exists(): - return True - try: # check if docker is in control groups - with open("/proc/self/cgroup") as file: - return any("docker" in line for line in file) - except OSError: - return False - - -def is_colab(): - # Is environment a Google Colab instance? - try: - import google.colab - return True - except ImportError: - return False - - -def is_pip(): - # Is file in a pip package? - return 'site-packages' in Path(__file__).resolve().parts - - -def is_ascii(s=''): - # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) - s = str(s) # convert list, tuple, None, etc. to str - return len(s.encode().decode('ascii', 'ignore')) == len(s) - - -def is_chinese(s='人工智能'): - # Is string composed of any Chinese characters? - return bool(re.search('[\u4e00-\u9fff]', str(s))) - - def emojis(str=''): # Return platform-dependent emoji-safe version of string return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str From d1dfcab60493e80441cb00be33cfd23aabc26f6b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Aug 2022 22:22:52 +0200 Subject: [PATCH 005/326] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index c39faab4b415..61641bab1833 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1113,7 +1113,7 @@ "cell_type": "code", "source": [ "# Classification\n", - "for m in *(f'yolov5{x}.pt' for x in 'nsmlx'), 'resnet50.pt', 'efficientnet_b0.pt':\n", + "for m in [*(f'yolov5{x}.pt' for x in 'nsmlx'), 'resnet50.pt', 'efficientnet_b0.pt']:\n", " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" ], From fd004f56485d44c9c65b37c47d0e5f6165e1d944 Mon Sep 17 00:00:00 2001 From: Yana Date: Wed, 17 Aug 2022 05:58:30 +0700 Subject: [PATCH 006/326] Fix bug with resume (#8912) * Fix bug with resume * restore del on resume=false * Update train.py Co-authored-by: Glenn Jocher --- train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 12af2a305572..d24ac57df23d 100644 --- a/train.py +++ b/train.py @@ -167,7 +167,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Resume best_fitness, start_epoch = 0.0, 0 if pretrained: - best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) del ckpt, csd # DP mode From 3456fe65c652fca855687d876bdc873c8f85f798 Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Wed, 17 Aug 2022 11:51:07 +0200 Subject: [PATCH 007/326] Fix torchvision dependency for ClearML logging (#8993) * Replace torchvision annotator with native local Annotator * Add colored bounding boxes and rewrite for efficiency * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix pep8 issue * Update clearml_utils.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update clearml_utils.py * Reduced conf_threshold to 0.25 to match defaults The 0.25 default conf threshold value is used a default throughout YOLOv5 (i.e. in detect.py) and also in other tools like CoreML Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/clearml/clearml_utils.py | 32 +++++++++++++++----------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index fb9889172562..52320c090ddd 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -3,20 +3,22 @@ import re from pathlib import Path +import numpy as np import yaml -from torchvision.transforms import ToPILImage + +from utils.plots import Annotator, colors try: import clearml from clearml import Dataset, Task - from torchvision.utils import draw_bounding_boxes # WARNING: requires torchvision>=0.9.0 - assert hasattr(clearml, '__version__') # verify package import not local dir except (ImportError, AssertionError): clearml = None def construct_dataset(clearml_info_string): + """Load in a clearml dataset and fill the internal data_dict with its contents. + """ dataset_id = clearml_info_string.replace('clearml://', '') dataset = Dataset.get(dataset_id=dataset_id) dataset_root_path = Path(dataset.get_local_copy()) @@ -120,9 +122,9 @@ def log_debug_samples(self, files, title='Debug Samples'): local_path=str(f), iteration=iteration) - def log_image_with_boxes(self, image_path, boxes, class_names, image): + def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25): """ - Draw the bounding boxes on a single image and report the result as a ClearML debug sample + Draw the bounding boxes on a single image and report the result as a ClearML debug sample. arguments: image_path (PosixPath) the path the original image file @@ -133,16 +135,20 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image): if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0: # Log every bbox_interval times and deduplicate for any intermittend extra eval runs if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images: - converter = ToPILImage() - labels = [] - for conf, class_nr in zip(boxes[:, 4], boxes[:, 5]): + im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2)) + annotator = Annotator(im=im, pil=True) + for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])): + color = colors(i) + class_name = class_names[int(class_nr)] confidence = round(float(conf) * 100, 2) - labels.append(f"{class_name}: {confidence}%") - annotated_image = converter( - draw_bounding_boxes(image=image.mul(255).clamp(0, 255).byte().cpu(), - boxes=boxes[:, :4], - labels=labels)) + label = f"{class_name}: {confidence}%" + + if confidence > conf_threshold: + annotator.rectangle(box.cpu().numpy(), outline=color) + annotator.box_label(box.cpu().numpy(), label=label, color=color) + + annotated_image = annotator.result() self.task.get_logger().report_image(title='Bounding Boxes', series=image_path.name, iteration=self.current_epoch, From d3ea0df8b9f923685ce5f2555c303b8eddbf83fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 11:59:01 +0200 Subject: [PATCH 008/326] New YOLOv5 Classification Models (#8956) * Update * Logger step fix: Increment step with epochs (#8654) * enhance * revert * allow training from scratch * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update --img argument from train.py single line * fix image size from 640 to 128 * suport custom dataloader and augmentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * format * Update dataloaders.py * Single line return, single line comment, remove unused argument * address PR comments * fix spelling * don't augment eval set * use fstring * update augmentations.py * new maning convention for transforms * reverse if statement, inline ops * reverse if statement, inline ops * updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update dataloaders * Remove additional if statement * Remove is_train as redundant * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update classifier.py * Update augmentations.py * fix: imshow clip warning * update * Revert ToTensorV2 removal * Update classifier.py * Update normalize values, revert uint8 * normalize image using cv2 * remove dedundant comment * Update classifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * replace print with logger * commit steps * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Allow logging models from GenericLogger (#8676) * enhance * revert * allow training from scratch * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update --img argument from train.py single line * fix image size from 640 to 128 * suport custom dataloader and augmentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * format * Update dataloaders.py * Single line return, single line comment, remove unused argument * address PR comments * fix spelling * don't augment eval set * use fstring * update augmentations.py * new maning convention for transforms * reverse if statement, inline ops * reverse if statement, inline ops * updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update dataloaders * Remove additional if statement * Remove is_train as redundant * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update classifier.py * Update augmentations.py * fix: imshow clip warning * update * Revert ToTensorV2 removal * Update classifier.py * Update normalize values, revert uint8 * normalize image using cv2 * remove dedundant comment * Update classifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * replace print with logger * commit steps * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * support final model logging * update * update * update * update * remove curses * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update classifier.py * Update __init__.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher * Update * Update * Update * Update * Update dataset download * Update dataset download * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Pass imgsz to classify_transforms() * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Cos scheduler * Cos scheduler * Remove unused args * Update * Add seed * Add seed * Update * Update * Add run(), main() * Merge master * Merge master * Update * Update * Update * Update * Update * Update * Update * Create YOLOv5 BaseModel class (#8829) * Create BaseModel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Hub load device fix * Update Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * Add experiment * Merge master * Attach names * weight decay = 1e-4 * weight decay = 5e-5 * update smart_optimizer console printout * fashion-mnist fix * Merge master * Update Table * Update Table * Remove destroy process group * add kwargs to forward() * fuse fix for resnet50 * nc, names fix for resnet50 * nc, names fix for resnet50 * ONNX CPU inference fix * revert * cuda * if augment or visualize * if augment or visualize * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * New smart_inference_mode() * Update README * Refactor into /classify dir * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * reset defaults * reset defaults * fix gpu predict * warmup * ema half fix * spacing * remove data * remove cache * remove denormalize * save run settings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * verbose false on initial plots * new save_yaml() function * Update ci-testing.yml * Path(data) CI fix * Separate classification CI * fix val * fix val * fix val * smartCrossEntropyLoss * skip validation on hub load * autodownload with working dir root * str(data) * Dataset usage example * im_show normalize * im_show normalize * add imagenet simple names to multibackend * Add validation speeds * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * 24-space names * Update bash scripts * Update permissions * Add bash script arguments * remove verbose * TRT data fix * names generator fix * optimize if names * update usage * Add local loading * Verbose=False * update names printing * Add Usage examples * Add Usage examples * Add Usage examples * Add Usage examples * named_children * reshape_classifier_outputs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * update * fix CI * fix incorrect class substitution * fix incorrect class substitution * remove denormalize * ravel fix * cleanup * update opt file printing * update opt file printing * update defaults * add opt to checkpoint * Add warning * Add comment * plot half bug fix * Use NotImplementedError * fix export shape report * Fix TRT load * cleanup CI * profile comment * CI fix * Add cls models * avoid inplace error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix usage examples * Update README * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README * Update README * Update README * Update README * Update README * Update README * Update README * Update README * Update README * Update README * Update README * Update README * Update README * Update README * Update README * Update README Co-authored-by: Ayush Chaurasia Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 109 +++++++++-- .github/workflows/ci-testing.yml | 32 +-- README.md | 85 +++++++- classify/predict.py | 109 +++++++++++ classify/train.py | 325 +++++++++++++++++++++++++++++++ classify/val.py | 158 +++++++++++++++ data/ImageNet.yaml | 156 +++++++++++++++ data/scripts/download_weights.sh | 11 +- data/scripts/get_coco.sh | 8 +- data/scripts/get_coco128.sh | 2 +- data/scripts/get_imagenet.sh | 51 +++++ export.py | 4 +- models/common.py | 55 +++--- models/experimental.py | 11 +- models/yolo.py | 145 ++++++++------ train.py | 10 +- utils/augmentations.py | 67 ++++++- utils/dataloaders.py | 68 ++++++- utils/general.py | 20 +- utils/loggers/__init__.py | 85 +++++++- utils/plots.py | 29 +++ utils/torch_utils.py | 59 +++++- 22 files changed, 1437 insertions(+), 162 deletions(-) create mode 100644 classify/predict.py create mode 100644 classify/train.py create mode 100644 classify/val.py create mode 100644 data/ImageNet.yaml create mode 100755 data/scripts/get_imagenet.sh diff --git a/.github/README_cn.md b/.github/README_cn.md index 7b56462905d7..86b502df61f7 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -171,26 +171,23 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 ##
如何与第三方集成
-|Weights and Biases|Roboflow ⭐ 新| -|:-:|:-:| -|通过 [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) 自动跟踪和可视化你在云端的所有YOLOv5训练运行状态。|标记并将您的自定义数据集直接导出到YOLOv5,以便用 [Roboflow](https://roboflow.com/?ref=ultralytics) 进行训练。 | - - ##
为什么选择 YOLOv5
@@ -239,6 +236,84 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi + +##
Classification ⭐ NEW
+ +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation, prediction and export! We've made training classifier models super simple. Click below to get started. + +
+ Classification Checkpoints (click to expand) + +
+ +We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. + +| Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | +|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | + +
+ Table Notes (click to expand) + +- All checkpoints are trained to 90 epochs with SGD optimizer with lr0=0.001 at image size 224 and all default settings. Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2. +- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` +- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +
+
+ +
+ Classification Usage Examples (click to expand) + +### Train +YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. + +```bash +# Single-GPU +python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +``` + +### Val +Validate accuracy on a pretrained model. To validate YOLOv5s-cls accuracy on ImageNet. +```bash +bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) +python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 +``` + +### Predict +Run a classification prediction on an image. +```bash +python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +``` +```python +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +``` + +### Export +Export a group of trained YOLOv5-cls, ResNet and EfficientNet models to ONNX and TensorRT. +```bash +python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 +``` +
+ + ##
贡献
我们重视您的意见! 我们希望给大家提供尽可能的简单和透明的方式对 YOLOv5 做出贡献。开始之前请先点击并查看我们的 [贡献指南](CONTRIBUTING.md),填写[YOLOv5调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 来向我们发送您的经验反馈。真诚感谢我们所有的贡献者! diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 31d38ead530f..aa797c44d487 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -5,9 +5,9 @@ name: YOLOv5 CI on: push: - branches: [master] + branches: [ master ] pull_request: - branches: [master] + branches: [ master ] schedule: - cron: '0 0 * * *' # runs at 00:00 UTC every day @@ -16,9 +16,9 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest] - python-version: ['3.9'] # requires python<=3.9 - model: [yolov5n] + os: [ ubuntu-latest ] + python-version: [ '3.9' ] # requires python<=3.9 + model: [ yolov5n ] steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 @@ -47,9 +47,9 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ['3.10'] - model: [yolov5n] + os: [ ubuntu-latest, macos-latest, windows-latest ] + python-version: [ '3.10' ] + model: [ yolov5n ] include: - os: ubuntu-latest python-version: '3.7' # '3.6.8' min @@ -87,7 +87,7 @@ jobs: else pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu fi - shell: bash # required for Windows compatibility + shell: bash # for Windows compatibility - name: Check environment run: | python -c "import utils; utils.notebook_init()" @@ -100,8 +100,8 @@ jobs: python --version pip --version pip list - - name: Run tests - shell: bash + - name: Test detection + shell: bash # for Windows compatibility run: | # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories m=${{ matrix.model }} # official weights @@ -123,3 +123,13 @@ jobs: model = torch.hub.load('.', 'custom', path=path, source='local') print(model('data/images/bus.jpg')) EOF + - name: Test classification + shell: bash # for Windows compatibility + run: | + m=${{ matrix.model }}-cls.pt # official weights + b=runs/train-cls/exp/weights/best.pt # best.pt checkpoint + python classify/train.py --imgsz 32 --model $m --data mnist2560 --epochs 1 # train + python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist2560 # val + python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist2560/test/7/60.png # predict + python classify/predict.py --imgsz 32 --weights $m --source data/images/bus.jpg # predict + python export.py --weights $b --img 64 --imgsz 224 --include torchscript # export diff --git a/README.md b/README.md index 62c7ed4f53e6..b368d1d6e264 100644 --- a/README.md +++ b/README.md @@ -201,14 +201,6 @@ Get started in seconds with our verified environments. Click each icon below for |:-:|:-:|:-:|:-:| |Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) - ##
Why YOLOv5
@@ -254,6 +246,83 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi +##
Classification ⭐ NEW
+ +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation, prediction and export! We've made training classifier models super simple. Click below to get started. + +
+ Classification Checkpoints (click to expand) + +
+ +We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. + +| Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | +|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | + +
+ Table Notes (click to expand) + +- All checkpoints are trained to 90 epochs with SGD optimizer with lr0=0.001 at image size 224 and all default settings. Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2. +- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` +- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +
+
+ +
+ Classification Usage Examples (click to expand) + +### Train +YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. + +```bash +# Single-GPU +python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +``` + +### Val +Validate accuracy on a pretrained model. To validate YOLOv5s-cls accuracy on ImageNet. +```bash +bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) +python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 +``` + +### Predict +Run a classification prediction on an image. +```bash +python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +``` +```python +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +``` + +### Export +Export a group of trained YOLOv5-cls, ResNet and EfficientNet models to ONNX and TensorRT. +```bash +python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 +``` +
+ + ##
Contribute
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! diff --git a/classify/predict.py b/classify/predict.py new file mode 100644 index 000000000000..419830d43952 --- /dev/null +++ b/classify/predict.py @@ -0,0 +1,109 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run classification inference on images + +Usage: + $ python classify/predict.py --weights yolov5s-cls.pt --source im.jpg +""" + +import argparse +import os +import sys +from pathlib import Path + +import cv2 +import torch.nn.functional as F + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from classify.train import imshow_cls +from models.common import DetectMultiBackend +from utils.augmentations import classify_transforms +from utils.general import LOGGER, check_requirements, colorstr, increment_path, print_args +from utils.torch_utils import select_device, smart_inference_mode, time_sync + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) + source=ROOT / 'data/images/bus.jpg', # file/dir/URL/glob, 0 for webcam + imgsz=224, # inference size + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + show=True, + project=ROOT / 'runs/predict-cls', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment +): + file = str(source) + seen, dt = 1, [0.0, 0.0, 0.0] + device = select_device(device) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + save_dir.mkdir(parents=True, exist_ok=True) # make dir + + # Transforms + transforms = classify_transforms(imgsz) + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) + model.warmup(imgsz=(1, 3, imgsz, imgsz)) # warmup + + # Image + t1 = time_sync() + im = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB) + im = transforms(im).unsqueeze(0).to(device) + im = im.half() if model.fp16 else im.float() + t2 = time_sync() + dt[0] += t2 - t1 + + # Inference + results = model(im) + t3 = time_sync() + dt[1] += t3 - t2 + + p = F.softmax(results, dim=1) # probabilities + i = p.argsort(1, descending=True)[:, :5].squeeze() # top 5 indices + dt[2] += time_sync() - t3 + LOGGER.info(f"image 1/1 {file}: {imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}") + + # Print results + t = tuple(x / seen * 1E3 for x in dt) # speeds per image + shape = (1, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) + if show: + imshow_cls(im, f=save_dir / Path(file).name, verbose=True) + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + return p + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images/bus.jpg', help='file') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + opt = parser.parse_args() + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/classify/train.py b/classify/train.py new file mode 100644 index 000000000000..f2b465567446 --- /dev/null +++ b/classify/train.py @@ -0,0 +1,325 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 classifier model on a classification dataset +Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/custom/dataset' + +Usage: + $ python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 128 + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +""" + +import argparse +import os +import subprocess +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import torch +import torch.distributed as dist +import torch.hub as hub +import torch.optim.lr_scheduler as lr_scheduler +import torchvision +from torch.cuda import amp +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from classify import val as validate +from models.experimental import attempt_load +from models.yolo import ClassificationModel, DetectionModel +from utils.dataloaders import create_classification_dataloader +from utils.general import (DATASETS_DIR, LOGGER, WorkingDirectory, check_git_status, check_requirements, colorstr, + download, increment_path, init_seeds, print_args, yaml_save) +from utils.loggers import GenericLogger +from utils.plots import imshow_cls +from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, + smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def train(opt, device): + init_seeds(opt.seed + 1 + RANK, deterministic=True) + save_dir, data, bs, epochs, nw, imgsz, pretrained = \ + opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \ + opt.imgsz, str(opt.pretrained).lower() == 'true' + cuda = device.type != 'cpu' + + # Directories + wdir = save_dir / 'weights' + wdir.mkdir(parents=True, exist_ok=True) # make dir + last, best = wdir / 'last.pt', wdir / 'best.pt' + + # Save run settings + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Logger + logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None + + # Download Dataset + with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): + data_dir = data if data.is_dir() else (DATASETS_DIR / data) + if not data_dir.is_dir(): + LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') + t = time.time() + if str(data) == 'imagenet': + subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True) + else: + url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' + download(url, dir=data_dir.parent) + s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" + LOGGER.info(s) + + # Dataloaders + nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes + trainloader = create_classification_dataloader(path=data_dir / 'train', + imgsz=imgsz, + batch_size=bs // WORLD_SIZE, + augment=True, + cache=opt.cache, + rank=LOCAL_RANK, + workers=nw) + + test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val + if RANK in {-1, 0}: + testloader = create_classification_dataloader(path=test_dir, + imgsz=imgsz, + batch_size=bs // WORLD_SIZE * 2, + augment=False, + cache=opt.cache, + rank=-1, + workers=nw) + + # Model + with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): + if Path(opt.model).is_file() or opt.model.endswith('.pt'): + model = attempt_load(opt.model, device='cpu', fuse=False) + elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0 + model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None) + else: + m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models + raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m)) + if isinstance(model, DetectionModel): + LOGGER.warning("WARNING: pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") + model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model + reshape_classifier_output(model, nc) # update class count + for p in model.parameters(): + p.requires_grad = True # for training + for m in model.modules(): + if not pretrained and hasattr(m, 'reset_parameters'): + m.reset_parameters() + if isinstance(m, torch.nn.Dropout) and opt.dropout is not None: + m.p = opt.dropout # set dropout + model = model.to(device) + names = trainloader.dataset.classes # class names + model.names = names # attach class names + + # Info + if RANK in {-1, 0}: + model_info(model) + if opt.verbose: + LOGGER.info(model) + images, labels = next(iter(trainloader)) + file = imshow_cls(images[:25], labels[:25], names=names, f=save_dir / 'train_images.jpg') + logger.log_images(file, name='Train Examples') + logger.log_graph(model, imgsz) # log model + + # Optimizer + optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=5e-5) + + # Scheduler + lrf = 0.01 # final lr (fraction of lr0) + # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine + lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1, + # final_div_factor=1 / 25 / lrf) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Train + t0 = time.time() + criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function + best_fitness = 0.0 + scaler = amp.GradScaler(enabled=cuda) + val = test_dir.stem # 'val' or 'test' + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n' + f'Using {nw * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n' + f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}") + for epoch in range(epochs): # loop over the dataset multiple times + tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness + model.train() + if RANK != -1: + trainloader.sampler.set_epoch(epoch) + pbar = enumerate(trainloader) + if RANK in {-1, 0}: + pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') + for i, (images, labels) in pbar: # progress bar + images, labels = images.to(device, non_blocking=True), labels.to(device) + + # Forward + with amp.autocast(enabled=cuda): # stability issues when enabled + loss = criterion(model(images), labels) + + # Backward + scaler.scale(loss).backward() + + # Optimize + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + + if RANK in {-1, 0}: + # Print + tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses + mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) + pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36 + + # Test + if i == len(pbar) - 1: # last batch + top1, top5, vloss = validate.run(model=ema.ema, + dataloader=testloader, + criterion=criterion, + pbar=pbar) # test accuracy, loss + fitness = top1 # define fitness as top1 accuracy + + # Scheduler + scheduler.step() + + # Log metrics + if RANK in {-1, 0}: + # Best fitness + if fitness > best_fitness: + best_fitness = fitness + + # Log + metrics = { + "train/loss": tloss, + f"{val}/loss": vloss, + "metrics/accuracy_top1": top1, + "metrics/accuracy_top5": top5, + "lr/0": optimizer.param_groups[0]['lr']} # learning rate + logger.log_metrics(metrics, epoch) + + # Save model + final_epoch = epoch + 1 == epochs + if (not opt.nosave) or final_epoch: + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(), + 'ema': None, # deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': None, # optimizer.state_dict(), + 'opt': vars(opt), + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fitness: + torch.save(ckpt, best) + del ckpt + + # Train complete + if RANK in {-1, 0} and final_epoch: + LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' + f"\nResults saved to {colorstr('bold', save_dir)}" + f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" + f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" + f"\nExport: python export.py --weights {best} --include onnx" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" + f"\nVisualize: https://netron.app\n") + + # Plot examples + images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels + pred = torch.max(ema.ema((images.half() if cuda else images.float()).to(device)), 1)[1] + file = imshow_cls(images, labels, pred, names, verbose=False, f=save_dir / 'test_images.jpg') + + # Log results + meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} + logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) + logger.log_model(best, epochs, metadata=meta) + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path') + parser.add_argument('--data', type=str, default='mnist', help='cifar10, cifar100, mnist, imagenet, etc.') + parser.add_argument('--epochs', type=int, default=10) + parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=128, help='train, val image size (pixels)') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False') + parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer') + parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate') + parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon') + parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head') + parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)') + parser.add_argument('--verbose', action='store_true', help='Verbose mode') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + check_git_status() + check_requirements() + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Parameters + opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run + + # Train + train(opt, device) + + +def run(**kwargs): + # Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/classify/val.py b/classify/val.py new file mode 100644 index 000000000000..0930ba8c9c51 --- /dev/null +++ b/classify/val.py @@ -0,0 +1,158 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a classification model on a dataset + +Usage: + $ python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet +""" + +import argparse +import os +import sys +from pathlib import Path + +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import create_classification_dataloader +from utils.general import LOGGER, check_img_size, check_requirements, colorstr, increment_path, print_args +from utils.torch_utils import select_device, smart_inference_mode, time_sync + + +@smart_inference_mode() +def run( + data=ROOT / '../datasets/mnist', # dataset dir + weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) + batch_size=128, # batch size + imgsz=224, # inference size (pixels) + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + verbose=False, # verbose output + project=ROOT / 'runs/val-cls', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + criterion=None, + pbar=None, +): + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + save_dir.mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Dataloader + data = Path(data) + test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val + dataloader = create_classification_dataloader(path=test_dir, + imgsz=imgsz, + batch_size=batch_size, + augment=False, + rank=-1, + workers=workers) + + model.eval() + pred, targets, loss, dt = [], [], 0, [0.0, 0.0, 0.0] + n = len(dataloader) # number of batches + action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' + desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" + bar = tqdm(dataloader, desc, n, not training, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0) + with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): + for images, labels in bar: + t1 = time_sync() + images, labels = images.to(device, non_blocking=True), labels.to(device) + t2 = time_sync() + dt[0] += t2 - t1 + + y = model(images) + t3 = time_sync() + dt[1] += t3 - t2 + + pred.append(y.argsort(1, descending=True)[:, :5]) + targets.append(labels) + if criterion: + loss += criterion(y, labels) + dt[2] += time_sync() - t3 + + loss /= n + pred, targets = torch.cat(pred), torch.cat(targets) + correct = (targets[:, None] == pred).float() + acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy + top1, top5 = acc.mean(0).tolist() + + if pbar: + pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" + if verbose: # all classes + LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") + LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") + for i, c in enumerate(model.names): + aci = acc[targets == i] + top1i, top5i = aci.mean(0).tolist() + LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") + + # Print results + t = tuple(x / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image + shape = (1, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + + return top1, top5, loss + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)') + parser.add_argument('--batch-size', type=int, default=128, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output') + parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/data/ImageNet.yaml b/data/ImageNet.yaml new file mode 100644 index 000000000000..9f89b4268aff --- /dev/null +++ b/data/ImageNet.yaml @@ -0,0 +1,156 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University +# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels +# Example usage: python classify/train.py --data imagenet +# parent +# ├── yolov5 +# └── datasets +# └── imagenet ← downloads here (144 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/imagenet # dataset root dir +train: train # train images (relative to 'path') 1281167 images +val: val # val images (relative to 'path') 50000 images +test: # test images (optional) + +# Classes +nc: 1000 # number of classes +names: ['tench', 'goldfish', 'great white shark', 'tiger shark', 'hammerhead shark', 'electric ray', 'stingray', 'cock', + 'hen', 'ostrich', 'brambling', 'goldfinch', 'house finch', 'junco', 'indigo bunting', 'American robin', + 'bulbul', 'jay', 'magpie', 'chickadee', 'American dipper', 'kite', 'bald eagle', 'vulture', 'great grey owl', + 'fire salamander', 'smooth newt', 'newt', 'spotted salamander', 'axolotl', 'American bullfrog', 'tree frog', + 'tailed frog', 'loggerhead sea turtle', 'leatherback sea turtle', 'mud turtle', 'terrapin', 'box turtle', + 'banded gecko', 'green iguana', 'Carolina anole', 'desert grassland whiptail lizard', 'agama', + 'frilled-necked lizard', 'alligator lizard', 'Gila monster', 'European green lizard', 'chameleon', + 'Komodo dragon', 'Nile crocodile', 'American alligator', 'triceratops', 'worm snake', 'ring-necked snake', + 'eastern hog-nosed snake', 'smooth green snake', 'kingsnake', 'garter snake', 'water snake', 'vine snake', + 'night snake', 'boa constrictor', 'African rock python', 'Indian cobra', 'green mamba', 'sea snake', + 'Saharan horned viper', 'eastern diamondback rattlesnake', 'sidewinder', 'trilobite', 'harvestman', 'scorpion', + 'yellow garden spider', 'barn spider', 'European garden spider', 'southern black widow', 'tarantula', + 'wolf spider', 'tick', 'centipede', 'black grouse', 'ptarmigan', 'ruffed grouse', 'prairie grouse', 'peacock', + 'quail', 'partridge', 'grey parrot', 'macaw', 'sulphur-crested cockatoo', 'lorikeet', 'coucal', 'bee eater', + 'hornbill', 'hummingbird', 'jacamar', 'toucan', 'duck', 'red-breasted merganser', 'goose', 'black swan', + 'tusker', 'echidna', 'platypus', 'wallaby', 'koala', 'wombat', 'jellyfish', 'sea anemone', 'brain coral', + 'flatworm', 'nematode', 'conch', 'snail', 'slug', 'sea slug', 'chiton', 'chambered nautilus', 'Dungeness crab', + 'rock crab', 'fiddler crab', 'red king crab', 'American lobster', 'spiny lobster', 'crayfish', 'hermit crab', + 'isopod', 'white stork', 'black stork', 'spoonbill', 'flamingo', 'little blue heron', 'great egret', 'bittern', + 'crane (bird)', 'limpkin', 'common gallinule', 'American coot', 'bustard', 'ruddy turnstone', 'dunlin', + 'common redshank', 'dowitcher', 'oystercatcher', 'pelican', 'king penguin', 'albatross', 'grey whale', + 'killer whale', 'dugong', 'sea lion', 'Chihuahua', 'Japanese Chin', 'Maltese', 'Pekingese', 'Shih Tzu', + 'King Charles Spaniel', 'Papillon', 'toy terrier', 'Rhodesian Ridgeback', 'Afghan Hound', 'Basset Hound', + 'Beagle', 'Bloodhound', 'Bluetick Coonhound', 'Black and Tan Coonhound', 'Treeing Walker Coonhound', + 'English foxhound', 'Redbone Coonhound', 'borzoi', 'Irish Wolfhound', 'Italian Greyhound', 'Whippet', + 'Ibizan Hound', 'Norwegian Elkhound', 'Otterhound', 'Saluki', 'Scottish Deerhound', 'Weimaraner', + 'Staffordshire Bull Terrier', 'American Staffordshire Terrier', 'Bedlington Terrier', 'Border Terrier', + 'Kerry Blue Terrier', 'Irish Terrier', 'Norfolk Terrier', 'Norwich Terrier', 'Yorkshire Terrier', + 'Wire Fox Terrier', 'Lakeland Terrier', 'Sealyham Terrier', 'Airedale Terrier', 'Cairn Terrier', + 'Australian Terrier', 'Dandie Dinmont Terrier', 'Boston Terrier', 'Miniature Schnauzer', 'Giant Schnauzer', + 'Standard Schnauzer', 'Scottish Terrier', 'Tibetan Terrier', 'Australian Silky Terrier', + 'Soft-coated Wheaten Terrier', 'West Highland White Terrier', 'Lhasa Apso', 'Flat-Coated Retriever', + 'Curly-coated Retriever', 'Golden Retriever', 'Labrador Retriever', 'Chesapeake Bay Retriever', + 'German Shorthaired Pointer', 'Vizsla', 'English Setter', 'Irish Setter', 'Gordon Setter', 'Brittany', + 'Clumber Spaniel', 'English Springer Spaniel', 'Welsh Springer Spaniel', 'Cocker Spaniels', 'Sussex Spaniel', + 'Irish Water Spaniel', 'Kuvasz', 'Schipperke', 'Groenendael', 'Malinois', 'Briard', 'Australian Kelpie', + 'Komondor', 'Old English Sheepdog', 'Shetland Sheepdog', 'collie', 'Border Collie', 'Bouvier des Flandres', + 'Rottweiler', 'German Shepherd Dog', 'Dobermann', 'Miniature Pinscher', 'Greater Swiss Mountain Dog', + 'Bernese Mountain Dog', 'Appenzeller Sennenhund', 'Entlebucher Sennenhund', 'Boxer', 'Bullmastiff', + 'Tibetan Mastiff', 'French Bulldog', 'Great Dane', 'St. Bernard', 'husky', 'Alaskan Malamute', 'Siberian Husky', + 'Dalmatian', 'Affenpinscher', 'Basenji', 'pug', 'Leonberger', 'Newfoundland', 'Pyrenean Mountain Dog', + 'Samoyed', 'Pomeranian', 'Chow Chow', 'Keeshond', 'Griffon Bruxellois', 'Pembroke Welsh Corgi', + 'Cardigan Welsh Corgi', 'Toy Poodle', 'Miniature Poodle', 'Standard Poodle', 'Mexican hairless dog', + 'grey wolf', 'Alaskan tundra wolf', 'red wolf', 'coyote', 'dingo', 'dhole', 'African wild dog', 'hyena', + 'red fox', 'kit fox', 'Arctic fox', 'grey fox', 'tabby cat', 'tiger cat', 'Persian cat', 'Siamese cat', + 'Egyptian Mau', 'cougar', 'lynx', 'leopard', 'snow leopard', 'jaguar', 'lion', 'tiger', 'cheetah', 'brown bear', + 'American black bear', 'polar bear', 'sloth bear', 'mongoose', 'meerkat', 'tiger beetle', 'ladybug', + 'ground beetle', 'longhorn beetle', 'leaf beetle', 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee', + 'ant', 'grasshopper', 'cricket', 'stick insect', 'cockroach', 'mantis', 'cicada', 'leafhopper', 'lacewing', + 'dragonfly', 'damselfly', 'red admiral', 'ringlet', 'monarch butterfly', 'small white', 'sulphur butterfly', + 'gossamer-winged butterfly', 'starfish', 'sea urchin', 'sea cucumber', 'cottontail rabbit', 'hare', + 'Angora rabbit', 'hamster', 'porcupine', 'fox squirrel', 'marmot', 'beaver', 'guinea pig', 'common sorrel', + 'zebra', 'pig', 'wild boar', 'warthog', 'hippopotamus', 'ox', 'water buffalo', 'bison', 'ram', 'bighorn sheep', + 'Alpine ibex', 'hartebeest', 'impala', 'gazelle', 'dromedary', 'llama', 'weasel', 'mink', 'European polecat', + 'black-footed ferret', 'otter', 'skunk', 'badger', 'armadillo', 'three-toed sloth', 'orangutan', 'gorilla', + 'chimpanzee', 'gibbon', 'siamang', 'guenon', 'patas monkey', 'baboon', 'macaque', 'langur', + 'black-and-white colobus', 'proboscis monkey', 'marmoset', 'white-headed capuchin', 'howler monkey', 'titi', + "Geoffroy's spider monkey", 'common squirrel monkey', 'ring-tailed lemur', 'indri', 'Asian elephant', + 'African bush elephant', 'red panda', 'giant panda', 'snoek', 'eel', 'coho salmon', 'rock beauty', 'clownfish', + 'sturgeon', 'garfish', 'lionfish', 'pufferfish', 'abacus', 'abaya', 'academic gown', 'accordion', + 'acoustic guitar', 'aircraft carrier', 'airliner', 'airship', 'altar', 'ambulance', 'amphibious vehicle', + 'analog clock', 'apiary', 'apron', 'waste container', 'assault rifle', 'backpack', 'bakery', 'balance beam', + 'balloon', 'ballpoint pen', 'Band-Aid', 'banjo', 'baluster', 'barbell', 'barber chair', 'barbershop', 'barn', + 'barometer', 'barrel', 'wheelbarrow', 'baseball', 'basketball', 'bassinet', 'bassoon', 'swimming cap', + 'bath towel', 'bathtub', 'station wagon', 'lighthouse', 'beaker', 'military cap', 'beer bottle', 'beer glass', + 'bell-cot', 'bib', 'tandem bicycle', 'bikini', 'ring binder', 'binoculars', 'birdhouse', 'boathouse', + 'bobsleigh', 'bolo tie', 'poke bonnet', 'bookcase', 'bookstore', 'bottle cap', 'bow', 'bow tie', 'brass', 'bra', + 'breakwater', 'breastplate', 'broom', 'bucket', 'buckle', 'bulletproof vest', 'high-speed train', + 'butcher shop', 'taxicab', 'cauldron', 'candle', 'cannon', 'canoe', 'can opener', 'cardigan', 'car mirror', + 'carousel', 'tool kit', 'carton', 'car wheel', 'automated teller machine', 'cassette', 'cassette player', + 'castle', 'catamaran', 'CD player', 'cello', 'mobile phone', 'chain', 'chain-link fence', 'chain mail', + 'chainsaw', 'chest', 'chiffonier', 'chime', 'china cabinet', 'Christmas stocking', 'church', 'movie theater', + 'cleaver', 'cliff dwelling', 'cloak', 'clogs', 'cocktail shaker', 'coffee mug', 'coffeemaker', 'coil', + 'combination lock', 'computer keyboard', 'confectionery store', 'container ship', 'convertible', 'corkscrew', + 'cornet', 'cowboy boot', 'cowboy hat', 'cradle', 'crane (machine)', 'crash helmet', 'crate', 'infant bed', + 'Crock Pot', 'croquet ball', 'crutch', 'cuirass', 'dam', 'desk', 'desktop computer', 'rotary dial telephone', + 'diaper', 'digital clock', 'digital watch', 'dining table', 'dishcloth', 'dishwasher', 'disc brake', 'dock', + 'dog sled', 'dome', 'doormat', 'drilling rig', 'drum', 'drumstick', 'dumbbell', 'Dutch oven', 'electric fan', + 'electric guitar', 'electric locomotive', 'entertainment center', 'envelope', 'espresso machine', 'face powder', + 'feather boa', 'filing cabinet', 'fireboat', 'fire engine', 'fire screen sheet', 'flagpole', 'flute', + 'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen', 'four-poster bed', 'freight car', + 'French horn', 'frying pan', 'fur coat', 'garbage truck', 'gas mask', 'gas pump', 'goblet', 'go-kart', + 'golf ball', 'golf cart', 'gondola', 'gong', 'gown', 'grand piano', 'greenhouse', 'grille', 'grocery store', + 'guillotine', 'barrette', 'hair spray', 'half-track', 'hammer', 'hamper', 'hair dryer', 'hand-held computer', + 'handkerchief', 'hard disk drive', 'harmonica', 'harp', 'harvester', 'hatchet', 'holster', 'home theater', + 'honeycomb', 'hook', 'hoop skirt', 'horizontal bar', 'horse-drawn vehicle', 'hourglass', 'iPod', 'clothes iron', + "jack-o'-lantern", 'jeans', 'jeep', 'T-shirt', 'jigsaw puzzle', 'pulled rickshaw', 'joystick', 'kimono', + 'knee pad', 'knot', 'lab coat', 'ladle', 'lampshade', 'laptop computer', 'lawn mower', 'lens cap', + 'paper knife', 'library', 'lifeboat', 'lighter', 'limousine', 'ocean liner', 'lipstick', 'slip-on shoe', + 'lotion', 'speaker', 'loupe', 'sawmill', 'magnetic compass', 'mail bag', 'mailbox', 'tights', 'tank suit', + 'manhole cover', 'maraca', 'marimba', 'mask', 'match', 'maypole', 'maze', 'measuring cup', 'medicine chest', + 'megalith', 'microphone', 'microwave oven', 'military uniform', 'milk can', 'minibus', 'miniskirt', 'minivan', + 'missile', 'mitten', 'mixing bowl', 'mobile home', 'Model T', 'modem', 'monastery', 'monitor', 'moped', + 'mortar', 'square academic cap', 'mosque', 'mosquito net', 'scooter', 'mountain bike', 'tent', 'computer mouse', + 'mousetrap', 'moving van', 'muzzle', 'nail', 'neck brace', 'necklace', 'nipple', 'notebook computer', 'obelisk', + 'oboe', 'ocarina', 'odometer', 'oil filter', 'organ', 'oscilloscope', 'overskirt', 'bullock cart', + 'oxygen mask', 'packet', 'paddle', 'paddle wheel', 'padlock', 'paintbrush', 'pajamas', 'palace', 'pan flute', + 'paper towel', 'parachute', 'parallel bars', 'park bench', 'parking meter', 'passenger car', 'patio', + 'payphone', 'pedestal', 'pencil case', 'pencil sharpener', 'perfume', 'Petri dish', 'photocopier', 'plectrum', + 'Pickelhaube', 'picket fence', 'pickup truck', 'pier', 'piggy bank', 'pill bottle', 'pillow', 'ping-pong ball', + 'pinwheel', 'pirate ship', 'pitcher', 'hand plane', 'planetarium', 'plastic bag', 'plate rack', 'plow', + 'plunger', 'Polaroid camera', 'pole', 'police van', 'poncho', 'billiard table', 'soda bottle', 'pot', + "potter's wheel", 'power drill', 'prayer rug', 'printer', 'prison', 'projectile', 'projector', 'hockey puck', + 'punching bag', 'purse', 'quill', 'quilt', 'race car', 'racket', 'radiator', 'radio', 'radio telescope', + 'rain barrel', 'recreational vehicle', 'reel', 'reflex camera', 'refrigerator', 'remote control', 'restaurant', + 'revolver', 'rifle', 'rocking chair', 'rotisserie', 'eraser', 'rugby ball', 'ruler', 'running shoe', 'safe', + 'safety pin', 'salt shaker', 'sandal', 'sarong', 'saxophone', 'scabbard', 'weighing scale', 'school bus', + 'schooner', 'scoreboard', 'CRT screen', 'screw', 'screwdriver', 'seat belt', 'sewing machine', 'shield', + 'shoe store', 'shoji', 'shopping basket', 'shopping cart', 'shovel', 'shower cap', 'shower curtain', 'ski', + 'ski mask', 'sleeping bag', 'slide rule', 'sliding door', 'slot machine', 'snorkel', 'snowmobile', 'snowplow', + 'soap dispenser', 'soccer ball', 'sock', 'solar thermal collector', 'sombrero', 'soup bowl', 'space bar', + 'space heater', 'space shuttle', 'spatula', 'motorboat', 'spider web', 'spindle', 'sports car', 'spotlight', + 'stage', 'steam locomotive', 'through arch bridge', 'steel drum', 'stethoscope', 'scarf', 'stone wall', + 'stopwatch', 'stove', 'strainer', 'tram', 'stretcher', 'couch', 'stupa', 'submarine', 'suit', 'sundial', + 'sunglass', 'sunglasses', 'sunscreen', 'suspension bridge', 'mop', 'sweatshirt', 'swimsuit', 'swing', 'switch', + 'syringe', 'table lamp', 'tank', 'tape player', 'teapot', 'teddy bear', 'television', 'tennis ball', + 'thatched roof', 'front curtain', 'thimble', 'threshing machine', 'throne', 'tile roof', 'toaster', + 'tobacco shop', 'toilet seat', 'torch', 'totem pole', 'tow truck', 'toy store', 'tractor', 'semi-trailer truck', + 'tray', 'trench coat', 'tricycle', 'trimaran', 'tripod', 'triumphal arch', 'trolleybus', 'trombone', 'tub', + 'turnstile', 'typewriter keyboard', 'umbrella', 'unicycle', 'upright piano', 'vacuum cleaner', 'vase', 'vault', + 'velvet', 'vending machine', 'vestment', 'viaduct', 'violin', 'volleyball', 'waffle iron', 'wall clock', + 'wallet', 'wardrobe', 'military aircraft', 'sink', 'washing machine', 'water bottle', 'water jug', + 'water tower', 'whiskey jug', 'whistle', 'wig', 'window screen', 'window shade', 'Windsor tie', 'wine bottle', + 'wing', 'wok', 'wooden spoon', 'wool', 'split-rail fence', 'shipwreck', 'yawl', 'yurt', 'website', 'comic book', + 'crossword', 'traffic sign', 'traffic light', 'dust jacket', 'menu', 'plate', 'guacamole', 'consomme', + 'hot pot', 'trifle', 'ice cream', 'ice pop', 'baguette', 'bagel', 'pretzel', 'cheeseburger', 'hot dog', + 'mashed potato', 'cabbage', 'broccoli', 'cauliflower', 'zucchini', 'spaghetti squash', 'acorn squash', + 'butternut squash', 'cucumber', 'artichoke', 'bell pepper', 'cardoon', 'mushroom', 'Granny Smith', 'strawberry', + 'orange', 'lemon', 'fig', 'pineapple', 'banana', 'jackfruit', 'custard apple', 'pomegranate', 'hay', + 'carbonara', 'chocolate syrup', 'dough', 'meatloaf', 'pizza', 'pot pie', 'burrito', 'red wine', 'espresso', + 'cup', 'eggnog', 'alp', 'bubble', 'cliff', 'coral reef', 'geyser', 'lakeshore', 'promontory', 'shoal', + 'seashore', 'valley', 'volcano', 'baseball player', 'bridegroom', 'scuba diver', 'rapeseed', 'daisy', + "yellow lady's slipper", 'corn', 'acorn', 'rose hip', 'horse chestnut seed', 'coral fungus', 'agaric', + 'gyromitra', 'stinkhorn mushroom', 'earth star', 'hen-of-the-woods', 'bolete', 'ear', + 'toilet paper'] # class names + +# Download script/URL (optional) +download: data/scripts/get_imagenet.sh diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index e9fa65394178..a4f3becfdbeb 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,7 +1,7 @@ #!/bin/bash # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Download latest models from https://github.com/ultralytics/yolov5/releases -# Example usage: bash path/to/download_weights.sh +# Example usage: bash data/scripts/download_weights.sh # parent # └── yolov5 # ├── yolov5s.pt ← downloads here @@ -11,10 +11,11 @@ python - <=7.0.0 + if device.type == 'cpu': + device = torch.device('cuda:0') Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: @@ -398,8 +396,8 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, if dtype == np.float16: fp16 = True shape = tuple(context.get_binding_shape(index)) - data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) - bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) + im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size elif coreml: # CoreML @@ -445,9 +443,16 @@ def wrap_frozen_graph(gd, inputs, outputs): input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs elif tfjs: - raise Exception('ERROR: YOLOv5 TF.js inference is not supported') + raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') else: - raise Exception(f'ERROR: {w} is not a supported format') + raise NotImplementedError(f'ERROR: {w} is not a supported format') + + # class names + if 'names' not in locals(): + names = yaml_load(data)['names'] if data else [f'class{i}' for i in range(999)] + if names[0] == 'n01440764' and len(names) == 1000: # ImageNet + names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names + self.__dict__.update(locals()) # assign all variables to self def forward(self, im, augment=False, visualize=False, val=False): @@ -457,7 +462,9 @@ def forward(self, im, augment=False, visualize=False, val=False): im = im.half() # to FP16 if self.pt: # PyTorch - y = self.model(im, augment=augment, visualize=visualize)[0] + y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) + if isinstance(y, tuple): + y = y[0] elif self.jit: # TorchScript y = self.model(im)[0] elif self.dnn: # ONNX OpenCV DNN @@ -526,7 +533,7 @@ def warmup(self, imgsz=(1, 3, 640, 640)): self.forward(im) # warmup @staticmethod - def model_type(p='path/to/model.pt'): + def _model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx from export import export_formats suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes @@ -540,8 +547,7 @@ def model_type(p='path/to/model.pt'): @staticmethod def _load_metadata(f='path/to/meta.yaml'): # Load metadata from meta.yaml if it exists - with open(f, errors='ignore') as f: - d = yaml.safe_load(f) + d = yaml_load(f) return d['stride'], d['names'] # assign stride, names @@ -753,10 +759,13 @@ class Classify(nn.Module): # Classification head, i.e. x(b,c1,20,20) to x(b,c2) def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() - self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) - self.flat = nn.Flatten() + c_ = 1280 # efficientnet_b0 size + self.conv = Conv(c1, c_, k, s, autopad(k, p), g) + self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) + self.drop = nn.Dropout(p=0.0, inplace=True) + self.linear = nn.Linear(c_, c2) # to x(b,c2) def forward(self, x): - z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list - return self.flat(self.conv(z)) # flatten to x(b,c2) + if isinstance(x, list): + x = torch.cat(x, 1) + return self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) diff --git a/models/experimental.py b/models/experimental.py index 0317c7526c99..cb32d01ba46a 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -79,7 +79,9 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location='cpu') # load ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model - model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode + if not hasattr(ckpt, 'stride'): + ckpt.stride = torch.tensor([32.]) # compatibility update for ResNet etc. + model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode # Compatibility updates for m in model.modules(): @@ -92,11 +94,14 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): m.recompute_scale_factor = None # torch 1.11.0 compatibility + # Return model if len(model) == 1: - return model[-1] # return model + return model[-1] + + # Return detection ensemble print(f'Ensemble created with {weights}\n') for k in 'names', 'nc', 'yaml': setattr(model, k, getattr(model[0], k)) model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' - return model # return ensemble + return model diff --git a/models/yolo.py b/models/yolo.py index 307b74844ca0..df4209726e0d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -90,8 +90,64 @@ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version return grid, anchor_grid -class Model(nn.Module): - # YOLOv5 model +class BaseModel(nn.Module): + # YOLOv5 base model + def forward(self, x, profile=False, visualize=False): + return self._forward_once(x, profile, visualize) # single-scale inference, train + + def _forward_once(self, x, profile=False, visualize=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + if profile: + self._profile_one_layer(m, x, dt) + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) + return x + + def _profile_one_layer(self, m, x, dt): + c = m == self.model[-1] # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + LOGGER.info('Fusing layers... ') + for m in self.model.modules(): + if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.forward_fuse # update forward + self.info() + return self + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + m = self.model[-1] # Detect() + if isinstance(m, Detect): + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + + +class DetectionModel(BaseModel): + # YOLOv5 detection model def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes super().__init__() if isinstance(cfg, dict): @@ -149,19 +205,6 @@ def _forward_augment(self, x): y = self._clip_augmented(y) # clip augmented tails return torch.cat(y, 1), None # augmented inference, train - def _forward_once(self, x, profile=False, visualize=False): - y, dt = [], [] # outputs - for m in self.model: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - if profile: - self._profile_one_layer(m, x, dt) - x = m(x) # run - y.append(x if m.i in self.save else None) # save output - if visualize: - feature_visualization(x, m.type, m.i, save_dir=visualize) - return x - def _descale_pred(self, p, flips, scale, img_size): # de-scale predictions following augmented inference (inverse operation) if self.inplace: @@ -190,19 +233,6 @@ def _clip_augmented(self, y): y[-1] = y[-1][:, i:] # small return y - def _profile_one_layer(self, m, x, dt): - c = isinstance(m, Detect) # is final layer, copy input as inplace fix - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs - t = time_sync() - for _ in range(10): - m(x.copy() if c else x) - dt.append((time_sync() - t) * 100) - if m == self.model[0]: - LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") - LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') - if c: - LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. @@ -213,41 +243,34 @@ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is b[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - def _print_biases(self): - m = self.model[-1] # Detect() module - for mi in m.m: # from - b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - LOGGER.info( - ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) - # def _print_weights(self): - # for m in self.model.modules(): - # if type(m) is Bottleneck: - # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights +Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - LOGGER.info('Fusing layers... ') - for m in self.model.modules(): - if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): - m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, 'bn') # remove batchnorm - m.forward = m.forward_fuse # update forward - self.info() - return self - def info(self, verbose=False, img_size=640): # print model information - model_info(self, verbose, img_size) - - def _apply(self, fn): - # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers - self = super()._apply(fn) - m = self.model[-1] # Detect() - if isinstance(m, Detect): - m.stride = fn(m.stride) - m.grid = list(map(fn, m.grid)) - if isinstance(m.anchor_grid, list): - m.anchor_grid = list(map(fn, m.anchor_grid)) - return self +class ClassificationModel(BaseModel): + # YOLOv5 classification model + def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index + super().__init__() + self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg) + + def _from_detection_model(self, model, nc=1000, cutoff=10): + # Create a YOLOv5 classification model from a YOLOv5 detection model + if isinstance(model, DetectMultiBackend): + model = model.model # unwrap DetectMultiBackend + model.model = model.model[:cutoff] # backbone + m = model.model[-1] # last layer + ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module + c = Classify(ch, nc) # Classify() + c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type + model.model[-1] = c # replace + self.model = model.model + self.stride = model.stride + self.save = [] + self.nc = nc + + def _from_yaml(self, cfg): + # Create a YOLOv5 classification model from a *.yaml file + self.model = None def parse_model(d, ch): # model_dict, input_channels(3) @@ -321,7 +344,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Options if opt.line_profile: # profile layer by layer - _ = model(im, profile=True) + model(im, profile=True) elif opt.profile: # profile forward-backward results = profile(input=im, ops=[model], n=3) diff --git a/train.py b/train.py index d24ac57df23d..bbb26cdeafeb 100644 --- a/train.py +++ b/train.py @@ -47,7 +47,7 @@ from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, - one_cycle, print_args, print_mutation, strip_optimizer) + one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss @@ -81,10 +81,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Save run settings if not evolve: - with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.safe_dump(hyp, f, sort_keys=False) - with open(save_dir / 'opt.yaml', 'w') as f: - yaml.safe_dump(vars(opt), f, sort_keys=False) + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) # Loggers data_dict = None @@ -484,7 +482,7 @@ def main(opt, callbacks=Callbacks()): if RANK in {-1, 0}: print_args(vars(opt)) check_git_status() - check_requirements(exclude=['thop']) + check_requirements() # Resume if opt.resume and not (check_wandb_resume(opt) or opt.evolve): # resume from specified or most recent last.pt diff --git a/utils/augmentations.py b/utils/augmentations.py index 3f764c06ae3b..a55fefa68a76 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -8,15 +8,21 @@ import cv2 import numpy as np +import torchvision.transforms as T +import torchvision.transforms.functional as TF from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box from utils.metrics import bbox_ioa +IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean +IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation + class Albumentations: # YOLOv5 Albumentations class (optional, only used if package is installed) def __init__(self): self.transform = None + prefix = colorstr('albumentations: ') try: import albumentations as A check_version(A.__version__, '1.0.3', hard=True) # version requirement @@ -31,11 +37,11 @@ def __init__(self): A.ImageCompression(quality_lower=75, p=0.0)] # transforms self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) except ImportError: # package not installed, skip pass except Exception as e: - LOGGER.info(colorstr('albumentations: ') + f'{e}') + LOGGER.info(f'{prefix}{e}') def __call__(self, im, labels, p=1.0): if self.transform and random.random() < p: @@ -44,6 +50,18 @@ def __call__(self, im, labels, p=1.0): return im, labels +def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std + return TF.normalize(x, mean, std, inplace=inplace) + + +def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean + for i in range(3): + x[:, i] = x[:, i] * std[i] + mean[i] + return x + + def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): # HSV color-space augmentation if hgain or sgain or vgain: @@ -282,3 +300,48 @@ def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): w2, h2 = box2[2] - box2[0], box2[3] - box2[1] ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def classify_albumentations(augment=True, + size=224, + scale=(0.08, 1.0), + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): + # YOLOv5 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentation + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if jitter > 0: + color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue + T += [A.ColorJitter(*color_jitter, 0)] + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +def classify_transforms(size=224): + # Transforms to apply if albumentations not installed + return T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 00f6413df7ad..2c04040bf25d 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -22,12 +22,14 @@ import numpy as np import torch import torch.nn.functional as F +import torchvision import yaml from PIL import ExifTags, Image, ImageOps from torch.utils.data import DataLoader, Dataset, dataloader, distributed from tqdm import tqdm -from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective +from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, + letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first @@ -870,7 +872,7 @@ def flatten_recursive(path=DATASETS_DIR / 'coco128'): def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() # Convert detection dataset into classification dataset, with one directory per class path = Path(path) # images dir - shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing files = list(path.rglob('*.*')) n = len(files) # number of files for im_file in tqdm(files, total=n): @@ -1090,3 +1092,65 @@ def process_images(self): pass print(f'Done. All images saved to {self.im_dir}') return self.im_dir + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLOv5 Classification Dataset. + Arguments + root: Dataset path + transform: torchvision transforms, used by default + album_transform: Albumentations transforms, used if installed + """ + + def __init__(self, root, augment, imgsz, cache=False): + super().__init__(root=root) + self.torch_transforms = classify_transforms(imgsz) + self.album_transforms = classify_albumentations(augment, imgsz) if augment else None + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + + def __getitem__(self, i): + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.album_transforms: + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + else: + sample = self.torch_transforms(self.loader(f)) + return sample, j + + +def create_classification_dataloader(path, + imgsz=224, + batch_size=16, + augment=True, + cache=False, + rank=-1, + workers=8, + shuffle=True): + # Returns Dataloader object to be used with YOLOv5 Classifier + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(0) + return InfiniteDataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + worker_init_fn=seed_worker, + generator=generator) # or DataLoader(persistent_workers=True) diff --git a/utils/general.py b/utils/general.py index 2a3ce37cd853..1c525c45f649 100755 --- a/utils/general.py +++ b/utils/general.py @@ -217,7 +217,11 @@ def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): if args is None: # get args automatically args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} - s = (f'{Path(file).stem}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '') + try: + file = Path(file).resolve().relative_to(ROOT).with_suffix('') + except ValueError: + file = Path(file).stem + s = (f'{file}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '') LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) @@ -345,7 +349,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @try_except def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()): - # Check installed dependencies meet requirements (pass *.txt file or list of packages) + # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version if isinstance(requirements, (str, Path)): # requirements.txt file @@ -549,6 +553,18 @@ def amp_allclose(model, im): return False +def yaml_load(file='data.yaml'): + # Single-line safe yaml loading + with open(file, errors='ignore') as f: + return yaml.safe_load(f) + + +def yaml_save(file='data.yaml', data={}): + # Single-line safe yaml saving + with open(file, 'w') as f: + yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) + + def url2file(url): # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 0f3eceafd0db..8ec846f8cfac 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -5,6 +5,7 @@ import os import warnings +from pathlib import Path import pkg_resources as pkg import torch @@ -76,7 +77,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.logger.info(s) if not clearml: prefix = colorstr('ClearML: ') - s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 runs in ClearML" + s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" self.logger.info(s) # TensorBoard @@ -121,11 +122,8 @@ def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end # ni: number integrated batches (since train start) if plots: - if ni == 0: - if self.tb and not self.opt.sync_bn: # --sync known issue https://github.com/ultralytics/yolov5/issues/3754 - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + if ni == 0 and not self.opt.sync_bn and self.tb: + log_tensorboard_graph(self.tb, model, imgsz=list(imgs.shape[2:4])) if ni < 3: f = self.save_dir / f'train_batch{ni}.jpg' # filename plot_images(imgs, targets, paths, f) @@ -233,3 +231,78 @@ def on_params_update(self, params): # params: A dict containing {param: value} pairs if self.wandb: self.wandb.wandb_run.config.update(params, allow_val_change=True) + + +class GenericLogger: + """ + YOLOv5 General purpose logger for non-task specific logging + Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) + Arguments + opt: Run arguments + console_logger: Console logger + include: loggers to include + """ + + def __init__(self, opt, console_logger, include=('tb', 'wandb')): + # init default loggers + self.save_dir = opt.save_dir + self.include = include + self.console_logger = console_logger + if 'tb' in self.include: + prefix = colorstr('TensorBoard: ') + self.console_logger.info( + f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(self.save_dir)) + + if wandb and 'wandb' in self.include: + self.wandb = wandb.init(project="YOLOv5-Classifier" if opt.project == "runs/train" else opt.project, + name=None if opt.name == "exp" else opt.name, + config=opt) + else: + self.wandb = None + + def log_metrics(self, metrics_dict, epoch): + # Log metrics dictionary to all loggers + if self.tb: + for k, v in metrics_dict.items(): + self.tb.add_scalar(k, v, epoch) + + if self.wandb: + self.wandb.log(metrics_dict, step=epoch) + + def log_images(self, files, name='Images', epoch=0): + # Log images to all loggers + files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path + files = [f for f in files if f.exists()] # filter by exists + + if self.tb: + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) + + def log_graph(self, model, imgsz=(640, 640)): + # Log model graph to all loggers + if self.tb: + log_tensorboard_graph(self.tb, model, imgsz) + + def log_model(self, model_path, epoch=0, metadata={}): + # Log model to all loggers + if self.wandb: + art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) + art.add_file(str(model_path)) + wandb.log_artifact(art) + + +def log_tensorboard_graph(tb, model, imgsz=(640, 640)): + # Log model graph to TensorBoard + try: + p = next(model.parameters()) # for device, type + imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand + im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) + except Exception: + print('WARNING: TensorBoard graph visualization failure') diff --git a/utils/plots.py b/utils/plots.py index d050f5d36aba..7417308c4d82 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -388,6 +388,35 @@ def plot_labels(labels, names=(), save_dir=Path('')): plt.close() +def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): + # Show classification image grid with labels (optional) and predictions (optional) + from utils.augmentations import denormalize + + names = names or [f'class{i}' for i in range(1000)] + blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), + dim=0) # select batch index 0, block by channels + n = min(len(blocks), nmax) # number of plots + m = min(8, round(n ** 0.5)) # 8 x 8 default + fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols + ax = ax.ravel() if m > 1 else [ax] + # plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0)) + ax[i].axis('off') + if labels is not None: + s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '') + ax[i].set_title(s, fontsize=8, verticalalignment='top') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + if verbose: + LOGGER.info(f"Saving {f}") + if labels is not None: + LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) + if pred is not None: + LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax])) + return f + + def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() # Plot evolve.csv hyp evolution results evolve_csv = Path(evolve_csv) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 1ceb0aa346e9..1cdbe20f8670 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -42,6 +42,16 @@ def decorate(fn): return decorate +def smartCrossEntropyLoss(label_smoothing=0.0): + # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 + if check_version(torch.__version__, '1.10.0'): + return nn.CrossEntropyLoss(label_smoothing=label_smoothing) # loss function + else: + if label_smoothing > 0: + LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') + return nn.CrossEntropyLoss() # loss function + + def smart_DDP(model): # Model DDP creation with checks assert not check_version(torch.__version__, '1.12.0', pinned=True), \ @@ -53,6 +63,28 @@ def smart_DDP(model): return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) +def reshape_classifier_output(model, n=1000): + # Update a TorchVision classification model to class count 'n' if required + from models.common import Classify + name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module + if isinstance(m, Classify): # YOLOv5 Classify() head + if m.linear.out_features != n: + m.linear = nn.Linear(m.linear.in_features, n) + elif isinstance(m, nn.Linear): # ResNet, EfficientNet + if m.out_features != n: + setattr(model, name, nn.Linear(m.in_features, n)) + elif isinstance(m, nn.Sequential): + types = [type(x) for x in m] + if nn.Linear in types: + i = types.index(nn.Linear) # nn.Linear index + if m[i].out_features != n: + m[i] = nn.Linear(m[i].in_features, n) + elif nn.Conv2d in types: + i = types.index(nn.Conv2d) # nn.Conv2d index + if m[i].out_channels != n: + m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias) + + @contextmanager def torch_distributed_zero_first(local_rank: int): # Decorator to make all processes in distributed training wait for each local_master to do something @@ -117,14 +149,13 @@ def time_sync(): def profile(input, ops, n=10, device=None): - # YOLOv5 speed/memory/FLOPs profiler - # - # Usage: - # input = torch.randn(16, 3, 640, 640) - # m1 = lambda x: x * torch.sigmoid(x) - # m2 = nn.SiLU() - # profile(input, [m1, m2], n=100) # profile over 100 iterations - + """ YOLOv5 speed/memory/FLOPs profiler + Usage: + input = torch.randn(16, 3, 640, 640) + m1 = lambda x: x * torch.sigmoid(x) + m2 = nn.SiLU() + profile(input, [m1, m2], n=100) # profile over 100 iterations + """ results = [] if not isinstance(device, torch.device): device = select_device(device) @@ -313,6 +344,18 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): return optimizer +def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs): + # YOLOv5 torch.hub.load() wrapper with smart error/issue handling + if check_version(torch.__version__, '1.9.1'): + kwargs['skip_validation'] = True # validation causes GitHub API rate limit errors + if check_version(torch.__version__, '1.12.0'): + kwargs['trust_repo'] = True # argument required starting in torch 0.12 + try: + return torch.hub.load(repo, model, **kwargs) + except Exception: + return torch.hub.load(repo, model, force_reload=True, **kwargs) + + def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, resume=True): # Resume training from a partially trained checkpoint best_fitness = 0.0 From e61756910758f59406255269921e55992ca0b64b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 15:33:37 +0200 Subject: [PATCH 009/326] Improve classification comments (#8997) --- .github/README_cn.md | 10 +++++----- README.md | 10 +++++----- classify/predict.py | 2 +- classify/train.py | 4 +++- classify/val.py | 3 ++- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 86b502df61f7..816adf6b0449 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -269,7 +269,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x
Table Notes (click to expand) -- All checkpoints are trained to 90 epochs with SGD optimizer with lr0=0.001 at image size 224 and all default settings. Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2. +- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 - **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` - **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` - **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` @@ -291,14 +291,14 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/trai ``` ### Val -Validate accuracy on a pretrained model. To validate YOLOv5s-cls accuracy on ImageNet. +Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: ```bash bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) -python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 +python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ``` ### Predict -Run a classification prediction on an image. +Use pretrained YOLOv5s-cls.pt to predict bus.jpg: ```bash python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` @@ -307,7 +307,7 @@ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load ``` ### Export -Export a group of trained YOLOv5-cls, ResNet and EfficientNet models to ONNX and TensorRT. +Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: ```bash python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 ``` diff --git a/README.md b/README.md index b368d1d6e264..7335394402da 100644 --- a/README.md +++ b/README.md @@ -278,7 +278,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x
Table Notes (click to expand) -- All checkpoints are trained to 90 epochs with SGD optimizer with lr0=0.001 at image size 224 and all default settings. Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2. +- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 - **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` - **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` - **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` @@ -300,14 +300,14 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/trai ``` ### Val -Validate accuracy on a pretrained model. To validate YOLOv5s-cls accuracy on ImageNet. +Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: ```bash bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) -python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 +python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ``` ### Predict -Run a classification prediction on an image. +Use pretrained YOLOv5s-cls.pt to predict bus.jpg: ```bash python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` @@ -316,7 +316,7 @@ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load ``` ### Export -Export a group of trained YOLOv5-cls, ResNet and EfficientNet models to ONNX and TensorRT. +Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: ```bash python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 ``` diff --git a/classify/predict.py b/classify/predict.py index 419830d43952..4247e3c8e7fa 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -3,7 +3,7 @@ Run classification inference on images Usage: - $ python classify/predict.py --weights yolov5s-cls.pt --source im.jpg + $ python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg """ import argparse diff --git a/classify/train.py b/classify/train.py index f2b465567446..b85f14236039 100644 --- a/classify/train.py +++ b/classify/train.py @@ -2,8 +2,10 @@ """ Train a YOLOv5 classifier model on a classification dataset Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/custom/dataset' +YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt +Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html -Usage: +Usage - Single-GPU and Multi-GPU DDP $ python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 128 $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 """ diff --git a/classify/val.py b/classify/val.py index 0930ba8c9c51..9d965d9f1fdc 100644 --- a/classify/val.py +++ b/classify/val.py @@ -3,7 +3,8 @@ Validate a classification model on a dataset Usage: - $ python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet + $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) + $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate """ import argparse From 7c9486e16f6a2c35bf5cfca892898a11a81009fc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 15:48:17 +0200 Subject: [PATCH 010/326] Update `attempt_download(release='v6.2')` (#8998) * Update attempt_download(release='v6.2') Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 20 ++++++++++---------- utils/downloads.py | 8 ++++---- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 7335394402da..89e4f1199cde 100644 --- a/README.md +++ b/README.md @@ -224,17 +224,17 @@ Get started in seconds with our verified environments. Click each icon below for | Model | size
(pixels) | mAPval
0.5:0.95 | mAPval
0.5 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | |------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | | | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes (click to expand) diff --git a/utils/downloads.py b/utils/downloads.py index 9d4780ad28b1..c4d4a85c38ae 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -54,14 +54,14 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): LOGGER.info('') -def attempt_download(file, repo='ultralytics/yolov5', release='v6.1'): - # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.1', etc. +def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'): + # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc. from utils.general import LOGGER def github_assets(repository, version='latest'): - # Return GitHub repo tag (i.e. 'v6.1') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) if version != 'latest': - version = f'tags/{version}' # i.e. tags/v6.1 + version = f'tags/{version}' # i.e. tags/v6.2 response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets From fe809b8dad5236d86d5acbe047b5e0e6895b2b8a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 16:18:06 +0200 Subject: [PATCH 011/326] Created using Colaboratory --- tutorial.ipynb | 304 ++++++++++++++++++++++++------------------------- 1 file changed, 152 insertions(+), 152 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 61641bab1833..1438924e4112 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -17,7 +17,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "c31d2039ccf74c22b67841f4877d1186": { + "57c562894aed45cd9a107d0455e3e3f4": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -32,14 +32,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_d4bba1727c714d94ad58a72bffa07c4c", - "IPY_MODEL_9aeff9f1780b45f892422fdc96e56913", - "IPY_MODEL_bf55a7c71d074d3fa88b10b997820825" + "IPY_MODEL_040d53c6cc924350bcb656cd21a7c713", + "IPY_MODEL_e029890942a74c098408ce5a9a566d51", + "IPY_MODEL_8fb991c03e434566a4297b6ab9446f89" ], - "layout": "IPY_MODEL_d8b66044e2fb4f5b916696834d880c81" + "layout": "IPY_MODEL_a9a376923a7742d89fb335db709c7a7e" } }, - "d4bba1727c714d94ad58a72bffa07c4c": { + "040d53c6cc924350bcb656cd21a7c713": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -54,13 +54,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_102e1deda239436fa72751c58202fa0f", + "layout": "IPY_MODEL_8b4276ac834c4735bf60ee9b761b9962", "placeholder": "​", - "style": "IPY_MODEL_4fd4431ced6c42368e18424912b877e4", + "style": "IPY_MODEL_52cc8da75b724198856617247541cb1e", "value": "100%" } }, - "9aeff9f1780b45f892422fdc96e56913": { + "e029890942a74c098408ce5a9a566d51": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -76,15 +76,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_cdd709c4f40941bea1b2053523c9fac8", + "layout": "IPY_MODEL_b6652f46480243c4adf60e6440043d6f", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_a1ef2d8de2b741c78ca5d938e2ddbcdf", + "style": "IPY_MODEL_e502754177ff4ea8abf82a6e9ac77a4a", "value": 818322941 } }, - "bf55a7c71d074d3fa88b10b997820825": { + "8fb991c03e434566a4297b6ab9446f89": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -99,13 +99,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_0dbce99bb6184238842cbec0587d564a", + "layout": "IPY_MODEL_447398becdb04836b5ffb5915318db07", "placeholder": "​", - "style": "IPY_MODEL_91ff5f93f2a24c5790ab29e347965946", - "value": " 780M/780M [01:10<00:00, 10.5MB/s]" + "style": "IPY_MODEL_2fddcb27ad4a4caa81ff51111f8d0ed6", + "value": " 780M/780M [01:17<00:00, 12.3MB/s]" } }, - "d8b66044e2fb4f5b916696834d880c81": { + "a9a376923a7742d89fb335db709c7a7e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -157,7 +157,7 @@ "width": null } }, - "102e1deda239436fa72751c58202fa0f": { + "8b4276ac834c4735bf60ee9b761b9962": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -209,7 +209,7 @@ "width": null } }, - "4fd4431ced6c42368e18424912b877e4": { + "52cc8da75b724198856617247541cb1e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -224,7 +224,7 @@ "description_width": "" } }, - "cdd709c4f40941bea1b2053523c9fac8": { + "b6652f46480243c4adf60e6440043d6f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -276,7 +276,7 @@ "width": null } }, - "a1ef2d8de2b741c78ca5d938e2ddbcdf": { + "e502754177ff4ea8abf82a6e9ac77a4a": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -292,7 +292,7 @@ "description_width": "" } }, - "0dbce99bb6184238842cbec0587d564a": { + "447398becdb04836b5ffb5915318db07": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -344,7 +344,7 @@ "width": null } }, - "91ff5f93f2a24c5790ab29e347965946": { + "2fddcb27ad4a4caa81ff51111f8d0ed6": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -404,7 +404,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "185d0979-edcd-4860-e6fb-b8a27dbf5096" + "outputId": "e0f693e4-413b-4cc8-ae7e-91537da370b0" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -415,13 +415,13 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" ] }, { @@ -461,29 +461,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "4b13989f-32a4-4ef0-b403-06ff3aac255c" + "outputId": "941d625b-01a1-4f1b-dfd2-d9ef1c945715" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", - "#display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" + "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 53.9MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", + "100% 14.1M/14.1M [00:00<00:00, 50.5MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.016s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.021s)\n", - "Speed: 0.6ms pre-process, 18.6ms inference, 25.0ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.014s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.020s)\n", + "Speed: 0.6ms pre-process, 17.0ms inference, 20.2ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -527,27 +527,27 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "c31d2039ccf74c22b67841f4877d1186", - "d4bba1727c714d94ad58a72bffa07c4c", - "9aeff9f1780b45f892422fdc96e56913", - "bf55a7c71d074d3fa88b10b997820825", - "d8b66044e2fb4f5b916696834d880c81", - "102e1deda239436fa72751c58202fa0f", - "4fd4431ced6c42368e18424912b877e4", - "cdd709c4f40941bea1b2053523c9fac8", - "a1ef2d8de2b741c78ca5d938e2ddbcdf", - "0dbce99bb6184238842cbec0587d564a", - "91ff5f93f2a24c5790ab29e347965946" + "57c562894aed45cd9a107d0455e3e3f4", + "040d53c6cc924350bcb656cd21a7c713", + "e029890942a74c098408ce5a9a566d51", + "8fb991c03e434566a4297b6ab9446f89", + "a9a376923a7742d89fb335db709c7a7e", + "8b4276ac834c4735bf60ee9b761b9962", + "52cc8da75b724198856617247541cb1e", + "b6652f46480243c4adf60e6440043d6f", + "e502754177ff4ea8abf82a6e9ac77a4a", + "447398becdb04836b5ffb5915318db07", + "2fddcb27ad4a4caa81ff51111f8d0ed6" ] }, - "outputId": "a9004b06-37a6-41ed-a1f2-ac956f3963b3" + "outputId": "d593b41a-55e7-48a5-e285-5df449edc8c0" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "display_data", @@ -558,7 +558,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "c31d2039ccf74c22b67841f4877d1186" + "model_id": "57c562894aed45cd9a107d0455e3e3f4" } }, "metadata": {} @@ -572,48 +572,48 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "c0f29758-4ec8-4def-893d-0efd6ed5b7f4" + "outputId": "701132a6-9ca8-4e1f-c89f-5d38893a6fc4" }, "source": [ "# Run YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x.pt to yolov5x.pt...\n", - "100% 166M/166M [00:35<00:00, 4.97MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt to yolov5x.pt...\n", + "100% 166M/166M [00:11<00:00, 15.1MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n", "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 49.4MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10716.86it/s]\n", + "100% 755k/755k [00:00<00:00, 48.6MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10889.87it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:08<00:00, 2.28it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:05<00:00, 2.38it/s]\n", " all 5000 36335 0.743 0.625 0.683 0.504\n", - "Speed: 0.1ms pre-process, 4.6ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 4.7ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.41s)\n", + "Done (t=0.39s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.64s)\n", + "DONE (t=5.53s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=72.86s).\n", + "DONE (t=73.01s).\n", "Accumulating evaluation results...\n", - "DONE (t=14.20s).\n", + "DONE (t=15.27s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n", @@ -745,13 +745,13 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "bce1b4bd-1a14-4c07-aebd-6c11e91ad24b" + "outputId": "50a9318f-d438-41d5-db95-928f1842c057" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", @@ -759,17 +759,17 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", - "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 runs in ClearML\n", + "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 75.2MB/s]\n", - "Dataset download success ✅ (0.7s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 12.4MB/s]\n", + "Dataset download success ✅ (1.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -802,12 +802,12 @@ "Transferred 349/349 items from yolov5s.pt\n", "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(always_apply=False, p=0.01, blur_limit=(3, 7)), MedianBlur(always_apply=False, p=0.01, blur_limit=(3, 7)), ToGray(always_apply=False, p=0.01), CLAHE(always_apply=False, p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7926.40it/s]\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 8516.89it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 975.81it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 1043.44it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Wed, 17 Aug 2022 17:50:32 +0200 Subject: [PATCH 012/326] Update README_cn.md (#9001) Includes v6.2 updates Signed-off-by: KieraMengru0907 <108015280+KieraMengru0907@users.noreply.github.com> Signed-off-by: KieraMengru0907 <108015280+KieraMengru0907@users.noreply.github.com> --- .github/README_cn.md | 65 +++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 816adf6b0449..46aafd86ec9b 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -130,19 +130,22 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
教程 -- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 推荐 -- [获得最佳训练效果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️ 推荐 -- [使用 Weights & Biases 记录实验](https://github.com/ultralytics/yolov5/issues/1289) 🌟 新 -- [Roboflow:数据集、标签和主动学习](https://github.com/ultralytics/yolov5/issues/4975) 🌟 新 +- [训练自定义数据集](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 推荐 +- [获得最佳训练效果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️ + 推荐 - [多GPU训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) ⭐ 新 -- [TFLite, ONNX, CoreML, TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251) 🚀 +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 新 +- [TFLite, ONNX, CoreML, TensorRT 输出](https://github.com/ultralytics/yolov5/issues/251) 🚀 - [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) - [模型集成](https://github.com/ultralytics/yolov5/issues/318) - [模型剪枝/稀疏性](https://github.com/ultralytics/yolov5/issues/304) - [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [带有冻结层的迁移学习](https://github.com/ultralytics/yolov5/issues/1314) ⭐ 新 -- [架构概要](https://github.com/ultralytics/yolov5/issues/6998) ⭐ 新 +- [带有冻结层的迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构概要](https://github.com/ultralytics/yolov5/issues/6998) 🌟 新 +- [使用Weights & Biases 记录实验](https://github.com/ultralytics/yolov5/issues/1289) +- [Roboflow:数据集,标签和主动学习](https://github.com/ultralytics/yolov5/issues/4975) 🌟 新 +- [使用ClearML 记录实验](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 新 +- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 新
@@ -186,7 +189,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 |Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases |:-:|:-:|:-:|:-:| -|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) +|在[Deci](https://bit.ly/yolov5-deci-platform)一键自动编译和量化YOLOv5以提高推理性能|使用[ClearML](https://cutt.ly/yolov5-readme-clearml) (开源!)自动追踪,可视化,以及远程训练YOLOv5|标记并将您的自定义数据直接导出到YOLOv5后,用[Roboflow](https://roboflow.com/?ref=ultralytics)进行训练 |通过[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)自动跟踪以及可视化你在云端所有的YOLOv5训练运行情况 ##
为什么选择 YOLOv5
@@ -209,7 +212,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 ### 预训练检查点 -| Model | size
(pixels) | mAPval
0.5:0.95 | mAPval
0.5 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | +| 模型 | 规模
(像素) | mAP验证
0.5:0.95 | mAP验证
0.5 | 速度
CPU b1
(ms) | 速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数
(M) | 浮点运算
@640 (B) | |------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| | [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | | [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | @@ -237,18 +240,18 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
-##
Classification ⭐ NEW
+##
分类 ⭐ 新
-YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation, prediction and export! We've made training classifier models super simple. Click below to get started. +YOLOv5发布的[v6.2版本](https://github.com/ultralytics/yolov5/releases) 支持训练,验证,预测和输出分类模型!这使得训练分类器模型非常简单。点击下面开始尝试!
- Classification Checkpoints (click to expand) + 分类检查点 (点击展开)
-We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. +我们在ImageNet上使用了4xA100的实例训练YOLOv5-cls分类模型90个epochs,并以相同的默认设置同时训练了ResNet和EfficientNet模型来进行比较。我们将所有的模型导出到ONNX FP32进行CPU速度测试,又导出到TensorRT FP16进行GPU速度测试。最后,为了方便重现,我们在[Google Colab Pro](https://colab.research.google.com/signup)上进行了所有的速度测试。 -| Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | +| 模型 | 规模
(像素) | 准确度
第一 | 准确度
前五 | 训练
90 epochs
4xA100 (小时) | 速度
ONNX CPU
(ms) | 速度
TensorRT V100
(ms) | 参数
(M) | 浮点运算
@224 (B) | |----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| | [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | | [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | @@ -267,38 +270,38 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x | [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
- Table Notes (click to expand) + 表格注释 (点击扩展) -- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 -- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` -- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +- 所有检查点都被SGD优化器训练到90 epochs, `lr0=0.001` 和 `weight_decay=5e-5`, 图像大小为224,全为默认设置。
运行数据记录于 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2。 +- **准确度** 值为[ImageNet-1k](https://www.image-net.org/index.php)数据集上的单模型单尺度。
通过`python classify/val.py --data ../datasets/imagenet --img 224`进行复制。 +- 使用Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM实例得出的100张推理图像的平均**速度**。
通过 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`进行复制。 +- 用`export.py`**导出**到FP32的ONNX和FP16的TensorRT。
通过 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`进行复制。
- Classification Usage Examples (click to expand) + 分类使用实例 (点击展开) -### Train -YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. +### 训练 +YOLOv5分类训练支持自动下载MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof和ImageNet数据集,并使用`--data` 参数. 打个比方,在MNIST上使用`--data mnist`开始训练。 ```bash -# Single-GPU +# 单GPU python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 -# Multi-GPU DDP +# 多-GPU DDP python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 ``` -### Val -Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: +### 验证 +在ImageNet-1k数据集上验证YOLOv5m-cl的准确性: ```bash bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ``` -### Predict -Use pretrained YOLOv5s-cls.pt to predict bus.jpg: +### 预测 +用提前训练好的YOLOv5s-cls.pt去预测bus.jpg: ```bash python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` @@ -306,8 +309,8 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub ``` -### Export -Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: +### 导出 +导出一组训练好的YOLOv5s-cls, ResNet和EfficientNet模型到ONNX和TensorRT: ```bash python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 ``` From e83b422a69bbd69628687b2dc50102c08877505c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 17:52:53 +0200 Subject: [PATCH 013/326] Update dataset `names` from array to dictionary (#9000) * Migrate dataset names to dictionary * fix check * backwards compat * predict fix * val fix * Keep dataset stats behavior identical Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/predict.py | 2 +- data/Argoverse.yaml | 11 +- data/GlobalWheat2020.yaml | 4 +- data/ImageNet.yaml | 1138 ++++++++++++++++++++++++++++++++----- data/Objects365.yaml | 408 +++++++++++-- data/SKU-110K.yaml | 4 +- data/VOC.yaml | 24 +- data/VisDrone.yaml | 13 +- data/coco.yaml | 91 ++- data/coco128.yaml | 91 ++- data/xView.yaml | 71 ++- models/common.py | 2 +- utils/dataloaders.py | 2 +- utils/general.py | 8 +- val.py | 4 +- 15 files changed, 1646 insertions(+), 227 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 4247e3c8e7fa..87379e42159b 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -71,7 +71,7 @@ def run( p = F.softmax(results, dim=1) # probabilities i = p.argsort(1, descending=True)[:, :5].squeeze() # top 5 indices dt[2] += time_sync() - t3 - LOGGER.info(f"image 1/1 {file}: {imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}") + LOGGER.info(f"image 1/1 {file}: {imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i.tolist())}") # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 9d21296e3291..e3e9ba161ed0 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -14,8 +14,15 @@ val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview # Classes -nc: 8 # number of classes -names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: bus + 5: truck + 6: traffic_light + 7: stop_sign # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 4c43693f1d82..01812d031bc5 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -26,8 +26,8 @@ test: # test images (optional) 1276 images - images/uq_1 # Classes -nc: 1 # number of classes -names: ['wheat_head'] # class names +names: + 0: wheat_head # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/ImageNet.yaml b/data/ImageNet.yaml index 9f89b4268aff..14f12950605f 100644 --- a/data/ImageNet.yaml +++ b/data/ImageNet.yaml @@ -15,142 +15,1008 @@ val: val # val images (relative to 'path') 50000 images test: # test images (optional) # Classes -nc: 1000 # number of classes -names: ['tench', 'goldfish', 'great white shark', 'tiger shark', 'hammerhead shark', 'electric ray', 'stingray', 'cock', - 'hen', 'ostrich', 'brambling', 'goldfinch', 'house finch', 'junco', 'indigo bunting', 'American robin', - 'bulbul', 'jay', 'magpie', 'chickadee', 'American dipper', 'kite', 'bald eagle', 'vulture', 'great grey owl', - 'fire salamander', 'smooth newt', 'newt', 'spotted salamander', 'axolotl', 'American bullfrog', 'tree frog', - 'tailed frog', 'loggerhead sea turtle', 'leatherback sea turtle', 'mud turtle', 'terrapin', 'box turtle', - 'banded gecko', 'green iguana', 'Carolina anole', 'desert grassland whiptail lizard', 'agama', - 'frilled-necked lizard', 'alligator lizard', 'Gila monster', 'European green lizard', 'chameleon', - 'Komodo dragon', 'Nile crocodile', 'American alligator', 'triceratops', 'worm snake', 'ring-necked snake', - 'eastern hog-nosed snake', 'smooth green snake', 'kingsnake', 'garter snake', 'water snake', 'vine snake', - 'night snake', 'boa constrictor', 'African rock python', 'Indian cobra', 'green mamba', 'sea snake', - 'Saharan horned viper', 'eastern diamondback rattlesnake', 'sidewinder', 'trilobite', 'harvestman', 'scorpion', - 'yellow garden spider', 'barn spider', 'European garden spider', 'southern black widow', 'tarantula', - 'wolf spider', 'tick', 'centipede', 'black grouse', 'ptarmigan', 'ruffed grouse', 'prairie grouse', 'peacock', - 'quail', 'partridge', 'grey parrot', 'macaw', 'sulphur-crested cockatoo', 'lorikeet', 'coucal', 'bee eater', - 'hornbill', 'hummingbird', 'jacamar', 'toucan', 'duck', 'red-breasted merganser', 'goose', 'black swan', - 'tusker', 'echidna', 'platypus', 'wallaby', 'koala', 'wombat', 'jellyfish', 'sea anemone', 'brain coral', - 'flatworm', 'nematode', 'conch', 'snail', 'slug', 'sea slug', 'chiton', 'chambered nautilus', 'Dungeness crab', - 'rock crab', 'fiddler crab', 'red king crab', 'American lobster', 'spiny lobster', 'crayfish', 'hermit crab', - 'isopod', 'white stork', 'black stork', 'spoonbill', 'flamingo', 'little blue heron', 'great egret', 'bittern', - 'crane (bird)', 'limpkin', 'common gallinule', 'American coot', 'bustard', 'ruddy turnstone', 'dunlin', - 'common redshank', 'dowitcher', 'oystercatcher', 'pelican', 'king penguin', 'albatross', 'grey whale', - 'killer whale', 'dugong', 'sea lion', 'Chihuahua', 'Japanese Chin', 'Maltese', 'Pekingese', 'Shih Tzu', - 'King Charles Spaniel', 'Papillon', 'toy terrier', 'Rhodesian Ridgeback', 'Afghan Hound', 'Basset Hound', - 'Beagle', 'Bloodhound', 'Bluetick Coonhound', 'Black and Tan Coonhound', 'Treeing Walker Coonhound', - 'English foxhound', 'Redbone Coonhound', 'borzoi', 'Irish Wolfhound', 'Italian Greyhound', 'Whippet', - 'Ibizan Hound', 'Norwegian Elkhound', 'Otterhound', 'Saluki', 'Scottish Deerhound', 'Weimaraner', - 'Staffordshire Bull Terrier', 'American Staffordshire Terrier', 'Bedlington Terrier', 'Border Terrier', - 'Kerry Blue Terrier', 'Irish Terrier', 'Norfolk Terrier', 'Norwich Terrier', 'Yorkshire Terrier', - 'Wire Fox Terrier', 'Lakeland Terrier', 'Sealyham Terrier', 'Airedale Terrier', 'Cairn Terrier', - 'Australian Terrier', 'Dandie Dinmont Terrier', 'Boston Terrier', 'Miniature Schnauzer', 'Giant Schnauzer', - 'Standard Schnauzer', 'Scottish Terrier', 'Tibetan Terrier', 'Australian Silky Terrier', - 'Soft-coated Wheaten Terrier', 'West Highland White Terrier', 'Lhasa Apso', 'Flat-Coated Retriever', - 'Curly-coated Retriever', 'Golden Retriever', 'Labrador Retriever', 'Chesapeake Bay Retriever', - 'German Shorthaired Pointer', 'Vizsla', 'English Setter', 'Irish Setter', 'Gordon Setter', 'Brittany', - 'Clumber Spaniel', 'English Springer Spaniel', 'Welsh Springer Spaniel', 'Cocker Spaniels', 'Sussex Spaniel', - 'Irish Water Spaniel', 'Kuvasz', 'Schipperke', 'Groenendael', 'Malinois', 'Briard', 'Australian Kelpie', - 'Komondor', 'Old English Sheepdog', 'Shetland Sheepdog', 'collie', 'Border Collie', 'Bouvier des Flandres', - 'Rottweiler', 'German Shepherd Dog', 'Dobermann', 'Miniature Pinscher', 'Greater Swiss Mountain Dog', - 'Bernese Mountain Dog', 'Appenzeller Sennenhund', 'Entlebucher Sennenhund', 'Boxer', 'Bullmastiff', - 'Tibetan Mastiff', 'French Bulldog', 'Great Dane', 'St. Bernard', 'husky', 'Alaskan Malamute', 'Siberian Husky', - 'Dalmatian', 'Affenpinscher', 'Basenji', 'pug', 'Leonberger', 'Newfoundland', 'Pyrenean Mountain Dog', - 'Samoyed', 'Pomeranian', 'Chow Chow', 'Keeshond', 'Griffon Bruxellois', 'Pembroke Welsh Corgi', - 'Cardigan Welsh Corgi', 'Toy Poodle', 'Miniature Poodle', 'Standard Poodle', 'Mexican hairless dog', - 'grey wolf', 'Alaskan tundra wolf', 'red wolf', 'coyote', 'dingo', 'dhole', 'African wild dog', 'hyena', - 'red fox', 'kit fox', 'Arctic fox', 'grey fox', 'tabby cat', 'tiger cat', 'Persian cat', 'Siamese cat', - 'Egyptian Mau', 'cougar', 'lynx', 'leopard', 'snow leopard', 'jaguar', 'lion', 'tiger', 'cheetah', 'brown bear', - 'American black bear', 'polar bear', 'sloth bear', 'mongoose', 'meerkat', 'tiger beetle', 'ladybug', - 'ground beetle', 'longhorn beetle', 'leaf beetle', 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee', - 'ant', 'grasshopper', 'cricket', 'stick insect', 'cockroach', 'mantis', 'cicada', 'leafhopper', 'lacewing', - 'dragonfly', 'damselfly', 'red admiral', 'ringlet', 'monarch butterfly', 'small white', 'sulphur butterfly', - 'gossamer-winged butterfly', 'starfish', 'sea urchin', 'sea cucumber', 'cottontail rabbit', 'hare', - 'Angora rabbit', 'hamster', 'porcupine', 'fox squirrel', 'marmot', 'beaver', 'guinea pig', 'common sorrel', - 'zebra', 'pig', 'wild boar', 'warthog', 'hippopotamus', 'ox', 'water buffalo', 'bison', 'ram', 'bighorn sheep', - 'Alpine ibex', 'hartebeest', 'impala', 'gazelle', 'dromedary', 'llama', 'weasel', 'mink', 'European polecat', - 'black-footed ferret', 'otter', 'skunk', 'badger', 'armadillo', 'three-toed sloth', 'orangutan', 'gorilla', - 'chimpanzee', 'gibbon', 'siamang', 'guenon', 'patas monkey', 'baboon', 'macaque', 'langur', - 'black-and-white colobus', 'proboscis monkey', 'marmoset', 'white-headed capuchin', 'howler monkey', 'titi', - "Geoffroy's spider monkey", 'common squirrel monkey', 'ring-tailed lemur', 'indri', 'Asian elephant', - 'African bush elephant', 'red panda', 'giant panda', 'snoek', 'eel', 'coho salmon', 'rock beauty', 'clownfish', - 'sturgeon', 'garfish', 'lionfish', 'pufferfish', 'abacus', 'abaya', 'academic gown', 'accordion', - 'acoustic guitar', 'aircraft carrier', 'airliner', 'airship', 'altar', 'ambulance', 'amphibious vehicle', - 'analog clock', 'apiary', 'apron', 'waste container', 'assault rifle', 'backpack', 'bakery', 'balance beam', - 'balloon', 'ballpoint pen', 'Band-Aid', 'banjo', 'baluster', 'barbell', 'barber chair', 'barbershop', 'barn', - 'barometer', 'barrel', 'wheelbarrow', 'baseball', 'basketball', 'bassinet', 'bassoon', 'swimming cap', - 'bath towel', 'bathtub', 'station wagon', 'lighthouse', 'beaker', 'military cap', 'beer bottle', 'beer glass', - 'bell-cot', 'bib', 'tandem bicycle', 'bikini', 'ring binder', 'binoculars', 'birdhouse', 'boathouse', - 'bobsleigh', 'bolo tie', 'poke bonnet', 'bookcase', 'bookstore', 'bottle cap', 'bow', 'bow tie', 'brass', 'bra', - 'breakwater', 'breastplate', 'broom', 'bucket', 'buckle', 'bulletproof vest', 'high-speed train', - 'butcher shop', 'taxicab', 'cauldron', 'candle', 'cannon', 'canoe', 'can opener', 'cardigan', 'car mirror', - 'carousel', 'tool kit', 'carton', 'car wheel', 'automated teller machine', 'cassette', 'cassette player', - 'castle', 'catamaran', 'CD player', 'cello', 'mobile phone', 'chain', 'chain-link fence', 'chain mail', - 'chainsaw', 'chest', 'chiffonier', 'chime', 'china cabinet', 'Christmas stocking', 'church', 'movie theater', - 'cleaver', 'cliff dwelling', 'cloak', 'clogs', 'cocktail shaker', 'coffee mug', 'coffeemaker', 'coil', - 'combination lock', 'computer keyboard', 'confectionery store', 'container ship', 'convertible', 'corkscrew', - 'cornet', 'cowboy boot', 'cowboy hat', 'cradle', 'crane (machine)', 'crash helmet', 'crate', 'infant bed', - 'Crock Pot', 'croquet ball', 'crutch', 'cuirass', 'dam', 'desk', 'desktop computer', 'rotary dial telephone', - 'diaper', 'digital clock', 'digital watch', 'dining table', 'dishcloth', 'dishwasher', 'disc brake', 'dock', - 'dog sled', 'dome', 'doormat', 'drilling rig', 'drum', 'drumstick', 'dumbbell', 'Dutch oven', 'electric fan', - 'electric guitar', 'electric locomotive', 'entertainment center', 'envelope', 'espresso machine', 'face powder', - 'feather boa', 'filing cabinet', 'fireboat', 'fire engine', 'fire screen sheet', 'flagpole', 'flute', - 'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen', 'four-poster bed', 'freight car', - 'French horn', 'frying pan', 'fur coat', 'garbage truck', 'gas mask', 'gas pump', 'goblet', 'go-kart', - 'golf ball', 'golf cart', 'gondola', 'gong', 'gown', 'grand piano', 'greenhouse', 'grille', 'grocery store', - 'guillotine', 'barrette', 'hair spray', 'half-track', 'hammer', 'hamper', 'hair dryer', 'hand-held computer', - 'handkerchief', 'hard disk drive', 'harmonica', 'harp', 'harvester', 'hatchet', 'holster', 'home theater', - 'honeycomb', 'hook', 'hoop skirt', 'horizontal bar', 'horse-drawn vehicle', 'hourglass', 'iPod', 'clothes iron', - "jack-o'-lantern", 'jeans', 'jeep', 'T-shirt', 'jigsaw puzzle', 'pulled rickshaw', 'joystick', 'kimono', - 'knee pad', 'knot', 'lab coat', 'ladle', 'lampshade', 'laptop computer', 'lawn mower', 'lens cap', - 'paper knife', 'library', 'lifeboat', 'lighter', 'limousine', 'ocean liner', 'lipstick', 'slip-on shoe', - 'lotion', 'speaker', 'loupe', 'sawmill', 'magnetic compass', 'mail bag', 'mailbox', 'tights', 'tank suit', - 'manhole cover', 'maraca', 'marimba', 'mask', 'match', 'maypole', 'maze', 'measuring cup', 'medicine chest', - 'megalith', 'microphone', 'microwave oven', 'military uniform', 'milk can', 'minibus', 'miniskirt', 'minivan', - 'missile', 'mitten', 'mixing bowl', 'mobile home', 'Model T', 'modem', 'monastery', 'monitor', 'moped', - 'mortar', 'square academic cap', 'mosque', 'mosquito net', 'scooter', 'mountain bike', 'tent', 'computer mouse', - 'mousetrap', 'moving van', 'muzzle', 'nail', 'neck brace', 'necklace', 'nipple', 'notebook computer', 'obelisk', - 'oboe', 'ocarina', 'odometer', 'oil filter', 'organ', 'oscilloscope', 'overskirt', 'bullock cart', - 'oxygen mask', 'packet', 'paddle', 'paddle wheel', 'padlock', 'paintbrush', 'pajamas', 'palace', 'pan flute', - 'paper towel', 'parachute', 'parallel bars', 'park bench', 'parking meter', 'passenger car', 'patio', - 'payphone', 'pedestal', 'pencil case', 'pencil sharpener', 'perfume', 'Petri dish', 'photocopier', 'plectrum', - 'Pickelhaube', 'picket fence', 'pickup truck', 'pier', 'piggy bank', 'pill bottle', 'pillow', 'ping-pong ball', - 'pinwheel', 'pirate ship', 'pitcher', 'hand plane', 'planetarium', 'plastic bag', 'plate rack', 'plow', - 'plunger', 'Polaroid camera', 'pole', 'police van', 'poncho', 'billiard table', 'soda bottle', 'pot', - "potter's wheel", 'power drill', 'prayer rug', 'printer', 'prison', 'projectile', 'projector', 'hockey puck', - 'punching bag', 'purse', 'quill', 'quilt', 'race car', 'racket', 'radiator', 'radio', 'radio telescope', - 'rain barrel', 'recreational vehicle', 'reel', 'reflex camera', 'refrigerator', 'remote control', 'restaurant', - 'revolver', 'rifle', 'rocking chair', 'rotisserie', 'eraser', 'rugby ball', 'ruler', 'running shoe', 'safe', - 'safety pin', 'salt shaker', 'sandal', 'sarong', 'saxophone', 'scabbard', 'weighing scale', 'school bus', - 'schooner', 'scoreboard', 'CRT screen', 'screw', 'screwdriver', 'seat belt', 'sewing machine', 'shield', - 'shoe store', 'shoji', 'shopping basket', 'shopping cart', 'shovel', 'shower cap', 'shower curtain', 'ski', - 'ski mask', 'sleeping bag', 'slide rule', 'sliding door', 'slot machine', 'snorkel', 'snowmobile', 'snowplow', - 'soap dispenser', 'soccer ball', 'sock', 'solar thermal collector', 'sombrero', 'soup bowl', 'space bar', - 'space heater', 'space shuttle', 'spatula', 'motorboat', 'spider web', 'spindle', 'sports car', 'spotlight', - 'stage', 'steam locomotive', 'through arch bridge', 'steel drum', 'stethoscope', 'scarf', 'stone wall', - 'stopwatch', 'stove', 'strainer', 'tram', 'stretcher', 'couch', 'stupa', 'submarine', 'suit', 'sundial', - 'sunglass', 'sunglasses', 'sunscreen', 'suspension bridge', 'mop', 'sweatshirt', 'swimsuit', 'swing', 'switch', - 'syringe', 'table lamp', 'tank', 'tape player', 'teapot', 'teddy bear', 'television', 'tennis ball', - 'thatched roof', 'front curtain', 'thimble', 'threshing machine', 'throne', 'tile roof', 'toaster', - 'tobacco shop', 'toilet seat', 'torch', 'totem pole', 'tow truck', 'toy store', 'tractor', 'semi-trailer truck', - 'tray', 'trench coat', 'tricycle', 'trimaran', 'tripod', 'triumphal arch', 'trolleybus', 'trombone', 'tub', - 'turnstile', 'typewriter keyboard', 'umbrella', 'unicycle', 'upright piano', 'vacuum cleaner', 'vase', 'vault', - 'velvet', 'vending machine', 'vestment', 'viaduct', 'violin', 'volleyball', 'waffle iron', 'wall clock', - 'wallet', 'wardrobe', 'military aircraft', 'sink', 'washing machine', 'water bottle', 'water jug', - 'water tower', 'whiskey jug', 'whistle', 'wig', 'window screen', 'window shade', 'Windsor tie', 'wine bottle', - 'wing', 'wok', 'wooden spoon', 'wool', 'split-rail fence', 'shipwreck', 'yawl', 'yurt', 'website', 'comic book', - 'crossword', 'traffic sign', 'traffic light', 'dust jacket', 'menu', 'plate', 'guacamole', 'consomme', - 'hot pot', 'trifle', 'ice cream', 'ice pop', 'baguette', 'bagel', 'pretzel', 'cheeseburger', 'hot dog', - 'mashed potato', 'cabbage', 'broccoli', 'cauliflower', 'zucchini', 'spaghetti squash', 'acorn squash', - 'butternut squash', 'cucumber', 'artichoke', 'bell pepper', 'cardoon', 'mushroom', 'Granny Smith', 'strawberry', - 'orange', 'lemon', 'fig', 'pineapple', 'banana', 'jackfruit', 'custard apple', 'pomegranate', 'hay', - 'carbonara', 'chocolate syrup', 'dough', 'meatloaf', 'pizza', 'pot pie', 'burrito', 'red wine', 'espresso', - 'cup', 'eggnog', 'alp', 'bubble', 'cliff', 'coral reef', 'geyser', 'lakeshore', 'promontory', 'shoal', - 'seashore', 'valley', 'volcano', 'baseball player', 'bridegroom', 'scuba diver', 'rapeseed', 'daisy', - "yellow lady's slipper", 'corn', 'acorn', 'rose hip', 'horse chestnut seed', 'coral fungus', 'agaric', - 'gyromitra', 'stinkhorn mushroom', 'earth star', 'hen-of-the-woods', 'bolete', 'ear', - 'toilet paper'] # class names +names: + 0: tench + 1: goldfish + 2: great white shark + 3: tiger shark + 4: hammerhead shark + 5: electric ray + 6: stingray + 7: cock + 8: hen + 9: ostrich + 10: brambling + 11: goldfinch + 12: house finch + 13: junco + 14: indigo bunting + 15: American robin + 16: bulbul + 17: jay + 18: magpie + 19: chickadee + 20: American dipper + 21: kite + 22: bald eagle + 23: vulture + 24: great grey owl + 25: fire salamander + 26: smooth newt + 27: newt + 28: spotted salamander + 29: axolotl + 30: American bullfrog + 31: tree frog + 32: tailed frog + 33: loggerhead sea turtle + 34: leatherback sea turtle + 35: mud turtle + 36: terrapin + 37: box turtle + 38: banded gecko + 39: green iguana + 40: Carolina anole + 41: desert grassland whiptail lizard + 42: agama + 43: frilled-necked lizard + 44: alligator lizard + 45: Gila monster + 46: European green lizard + 47: chameleon + 48: Komodo dragon + 49: Nile crocodile + 50: American alligator + 51: triceratops + 52: worm snake + 53: ring-necked snake + 54: eastern hog-nosed snake + 55: smooth green snake + 56: kingsnake + 57: garter snake + 58: water snake + 59: vine snake + 60: night snake + 61: boa constrictor + 62: African rock python + 63: Indian cobra + 64: green mamba + 65: sea snake + 66: Saharan horned viper + 67: eastern diamondback rattlesnake + 68: sidewinder + 69: trilobite + 70: harvestman + 71: scorpion + 72: yellow garden spider + 73: barn spider + 74: European garden spider + 75: southern black widow + 76: tarantula + 77: wolf spider + 78: tick + 79: centipede + 80: black grouse + 81: ptarmigan + 82: ruffed grouse + 83: prairie grouse + 84: peacock + 85: quail + 86: partridge + 87: grey parrot + 88: macaw + 89: sulphur-crested cockatoo + 90: lorikeet + 91: coucal + 92: bee eater + 93: hornbill + 94: hummingbird + 95: jacamar + 96: toucan + 97: duck + 98: red-breasted merganser + 99: goose + 100: black swan + 101: tusker + 102: echidna + 103: platypus + 104: wallaby + 105: koala + 106: wombat + 107: jellyfish + 108: sea anemone + 109: brain coral + 110: flatworm + 111: nematode + 112: conch + 113: snail + 114: slug + 115: sea slug + 116: chiton + 117: chambered nautilus + 118: Dungeness crab + 119: rock crab + 120: fiddler crab + 121: red king crab + 122: American lobster + 123: spiny lobster + 124: crayfish + 125: hermit crab + 126: isopod + 127: white stork + 128: black stork + 129: spoonbill + 130: flamingo + 131: little blue heron + 132: great egret + 133: bittern + 134: crane (bird) + 135: limpkin + 136: common gallinule + 137: American coot + 138: bustard + 139: ruddy turnstone + 140: dunlin + 141: common redshank + 142: dowitcher + 143: oystercatcher + 144: pelican + 145: king penguin + 146: albatross + 147: grey whale + 148: killer whale + 149: dugong + 150: sea lion + 151: Chihuahua + 152: Japanese Chin + 153: Maltese + 154: Pekingese + 155: Shih Tzu + 156: King Charles Spaniel + 157: Papillon + 158: toy terrier + 159: Rhodesian Ridgeback + 160: Afghan Hound + 161: Basset Hound + 162: Beagle + 163: Bloodhound + 164: Bluetick Coonhound + 165: Black and Tan Coonhound + 166: Treeing Walker Coonhound + 167: English foxhound + 168: Redbone Coonhound + 169: borzoi + 170: Irish Wolfhound + 171: Italian Greyhound + 172: Whippet + 173: Ibizan Hound + 174: Norwegian Elkhound + 175: Otterhound + 176: Saluki + 177: Scottish Deerhound + 178: Weimaraner + 179: Staffordshire Bull Terrier + 180: American Staffordshire Terrier + 181: Bedlington Terrier + 182: Border Terrier + 183: Kerry Blue Terrier + 184: Irish Terrier + 185: Norfolk Terrier + 186: Norwich Terrier + 187: Yorkshire Terrier + 188: Wire Fox Terrier + 189: Lakeland Terrier + 190: Sealyham Terrier + 191: Airedale Terrier + 192: Cairn Terrier + 193: Australian Terrier + 194: Dandie Dinmont Terrier + 195: Boston Terrier + 196: Miniature Schnauzer + 197: Giant Schnauzer + 198: Standard Schnauzer + 199: Scottish Terrier + 200: Tibetan Terrier + 201: Australian Silky Terrier + 202: Soft-coated Wheaten Terrier + 203: West Highland White Terrier + 204: Lhasa Apso + 205: Flat-Coated Retriever + 206: Curly-coated Retriever + 207: Golden Retriever + 208: Labrador Retriever + 209: Chesapeake Bay Retriever + 210: German Shorthaired Pointer + 211: Vizsla + 212: English Setter + 213: Irish Setter + 214: Gordon Setter + 215: Brittany + 216: Clumber Spaniel + 217: English Springer Spaniel + 218: Welsh Springer Spaniel + 219: Cocker Spaniels + 220: Sussex Spaniel + 221: Irish Water Spaniel + 222: Kuvasz + 223: Schipperke + 224: Groenendael + 225: Malinois + 226: Briard + 227: Australian Kelpie + 228: Komondor + 229: Old English Sheepdog + 230: Shetland Sheepdog + 231: collie + 232: Border Collie + 233: Bouvier des Flandres + 234: Rottweiler + 235: German Shepherd Dog + 236: Dobermann + 237: Miniature Pinscher + 238: Greater Swiss Mountain Dog + 239: Bernese Mountain Dog + 240: Appenzeller Sennenhund + 241: Entlebucher Sennenhund + 242: Boxer + 243: Bullmastiff + 244: Tibetan Mastiff + 245: French Bulldog + 246: Great Dane + 247: St. Bernard + 248: husky + 249: Alaskan Malamute + 250: Siberian Husky + 251: Dalmatian + 252: Affenpinscher + 253: Basenji + 254: pug + 255: Leonberger + 256: Newfoundland + 257: Pyrenean Mountain Dog + 258: Samoyed + 259: Pomeranian + 260: Chow Chow + 261: Keeshond + 262: Griffon Bruxellois + 263: Pembroke Welsh Corgi + 264: Cardigan Welsh Corgi + 265: Toy Poodle + 266: Miniature Poodle + 267: Standard Poodle + 268: Mexican hairless dog + 269: grey wolf + 270: Alaskan tundra wolf + 271: red wolf + 272: coyote + 273: dingo + 274: dhole + 275: African wild dog + 276: hyena + 277: red fox + 278: kit fox + 279: Arctic fox + 280: grey fox + 281: tabby cat + 282: tiger cat + 283: Persian cat + 284: Siamese cat + 285: Egyptian Mau + 286: cougar + 287: lynx + 288: leopard + 289: snow leopard + 290: jaguar + 291: lion + 292: tiger + 293: cheetah + 294: brown bear + 295: American black bear + 296: polar bear + 297: sloth bear + 298: mongoose + 299: meerkat + 300: tiger beetle + 301: ladybug + 302: ground beetle + 303: longhorn beetle + 304: leaf beetle + 305: dung beetle + 306: rhinoceros beetle + 307: weevil + 308: fly + 309: bee + 310: ant + 311: grasshopper + 312: cricket + 313: stick insect + 314: cockroach + 315: mantis + 316: cicada + 317: leafhopper + 318: lacewing + 319: dragonfly + 320: damselfly + 321: red admiral + 322: ringlet + 323: monarch butterfly + 324: small white + 325: sulphur butterfly + 326: gossamer-winged butterfly + 327: starfish + 328: sea urchin + 329: sea cucumber + 330: cottontail rabbit + 331: hare + 332: Angora rabbit + 333: hamster + 334: porcupine + 335: fox squirrel + 336: marmot + 337: beaver + 338: guinea pig + 339: common sorrel + 340: zebra + 341: pig + 342: wild boar + 343: warthog + 344: hippopotamus + 345: ox + 346: water buffalo + 347: bison + 348: ram + 349: bighorn sheep + 350: Alpine ibex + 351: hartebeest + 352: impala + 353: gazelle + 354: dromedary + 355: llama + 356: weasel + 357: mink + 358: European polecat + 359: black-footed ferret + 360: otter + 361: skunk + 362: badger + 363: armadillo + 364: three-toed sloth + 365: orangutan + 366: gorilla + 367: chimpanzee + 368: gibbon + 369: siamang + 370: guenon + 371: patas monkey + 372: baboon + 373: macaque + 374: langur + 375: black-and-white colobus + 376: proboscis monkey + 377: marmoset + 378: white-headed capuchin + 379: howler monkey + 380: titi + 381: Geoffroy's spider monkey + 382: common squirrel monkey + 383: ring-tailed lemur + 384: indri + 385: Asian elephant + 386: African bush elephant + 387: red panda + 388: giant panda + 389: snoek + 390: eel + 391: coho salmon + 392: rock beauty + 393: clownfish + 394: sturgeon + 395: garfish + 396: lionfish + 397: pufferfish + 398: abacus + 399: abaya + 400: academic gown + 401: accordion + 402: acoustic guitar + 403: aircraft carrier + 404: airliner + 405: airship + 406: altar + 407: ambulance + 408: amphibious vehicle + 409: analog clock + 410: apiary + 411: apron + 412: waste container + 413: assault rifle + 414: backpack + 415: bakery + 416: balance beam + 417: balloon + 418: ballpoint pen + 419: Band-Aid + 420: banjo + 421: baluster + 422: barbell + 423: barber chair + 424: barbershop + 425: barn + 426: barometer + 427: barrel + 428: wheelbarrow + 429: baseball + 430: basketball + 431: bassinet + 432: bassoon + 433: swimming cap + 434: bath towel + 435: bathtub + 436: station wagon + 437: lighthouse + 438: beaker + 439: military cap + 440: beer bottle + 441: beer glass + 442: bell-cot + 443: bib + 444: tandem bicycle + 445: bikini + 446: ring binder + 447: binoculars + 448: birdhouse + 449: boathouse + 450: bobsleigh + 451: bolo tie + 452: poke bonnet + 453: bookcase + 454: bookstore + 455: bottle cap + 456: bow + 457: bow tie + 458: brass + 459: bra + 460: breakwater + 461: breastplate + 462: broom + 463: bucket + 464: buckle + 465: bulletproof vest + 466: high-speed train + 467: butcher shop + 468: taxicab + 469: cauldron + 470: candle + 471: cannon + 472: canoe + 473: can opener + 474: cardigan + 475: car mirror + 476: carousel + 477: tool kit + 478: carton + 479: car wheel + 480: automated teller machine + 481: cassette + 482: cassette player + 483: castle + 484: catamaran + 485: CD player + 486: cello + 487: mobile phone + 488: chain + 489: chain-link fence + 490: chain mail + 491: chainsaw + 492: chest + 493: chiffonier + 494: chime + 495: china cabinet + 496: Christmas stocking + 497: church + 498: movie theater + 499: cleaver + 500: cliff dwelling + 501: cloak + 502: clogs + 503: cocktail shaker + 504: coffee mug + 505: coffeemaker + 506: coil + 507: combination lock + 508: computer keyboard + 509: confectionery store + 510: container ship + 511: convertible + 512: corkscrew + 513: cornet + 514: cowboy boot + 515: cowboy hat + 516: cradle + 517: crane (machine) + 518: crash helmet + 519: crate + 520: infant bed + 521: Crock Pot + 522: croquet ball + 523: crutch + 524: cuirass + 525: dam + 526: desk + 527: desktop computer + 528: rotary dial telephone + 529: diaper + 530: digital clock + 531: digital watch + 532: dining table + 533: dishcloth + 534: dishwasher + 535: disc brake + 536: dock + 537: dog sled + 538: dome + 539: doormat + 540: drilling rig + 541: drum + 542: drumstick + 543: dumbbell + 544: Dutch oven + 545: electric fan + 546: electric guitar + 547: electric locomotive + 548: entertainment center + 549: envelope + 550: espresso machine + 551: face powder + 552: feather boa + 553: filing cabinet + 554: fireboat + 555: fire engine + 556: fire screen sheet + 557: flagpole + 558: flute + 559: folding chair + 560: football helmet + 561: forklift + 562: fountain + 563: fountain pen + 564: four-poster bed + 565: freight car + 566: French horn + 567: frying pan + 568: fur coat + 569: garbage truck + 570: gas mask + 571: gas pump + 572: goblet + 573: go-kart + 574: golf ball + 575: golf cart + 576: gondola + 577: gong + 578: gown + 579: grand piano + 580: greenhouse + 581: grille + 582: grocery store + 583: guillotine + 584: barrette + 585: hair spray + 586: half-track + 587: hammer + 588: hamper + 589: hair dryer + 590: hand-held computer + 591: handkerchief + 592: hard disk drive + 593: harmonica + 594: harp + 595: harvester + 596: hatchet + 597: holster + 598: home theater + 599: honeycomb + 600: hook + 601: hoop skirt + 602: horizontal bar + 603: horse-drawn vehicle + 604: hourglass + 605: iPod + 606: clothes iron + 607: jack-o'-lantern + 608: jeans + 609: jeep + 610: T-shirt + 611: jigsaw puzzle + 612: pulled rickshaw + 613: joystick + 614: kimono + 615: knee pad + 616: knot + 617: lab coat + 618: ladle + 619: lampshade + 620: laptop computer + 621: lawn mower + 622: lens cap + 623: paper knife + 624: library + 625: lifeboat + 626: lighter + 627: limousine + 628: ocean liner + 629: lipstick + 630: slip-on shoe + 631: lotion + 632: speaker + 633: loupe + 634: sawmill + 635: magnetic compass + 636: mail bag + 637: mailbox + 638: tights + 639: tank suit + 640: manhole cover + 641: maraca + 642: marimba + 643: mask + 644: match + 645: maypole + 646: maze + 647: measuring cup + 648: medicine chest + 649: megalith + 650: microphone + 651: microwave oven + 652: military uniform + 653: milk can + 654: minibus + 655: miniskirt + 656: minivan + 657: missile + 658: mitten + 659: mixing bowl + 660: mobile home + 661: Model T + 662: modem + 663: monastery + 664: monitor + 665: moped + 666: mortar + 667: square academic cap + 668: mosque + 669: mosquito net + 670: scooter + 671: mountain bike + 672: tent + 673: computer mouse + 674: mousetrap + 675: moving van + 676: muzzle + 677: nail + 678: neck brace + 679: necklace + 680: nipple + 681: notebook computer + 682: obelisk + 683: oboe + 684: ocarina + 685: odometer + 686: oil filter + 687: organ + 688: oscilloscope + 689: overskirt + 690: bullock cart + 691: oxygen mask + 692: packet + 693: paddle + 694: paddle wheel + 695: padlock + 696: paintbrush + 697: pajamas + 698: palace + 699: pan flute + 700: paper towel + 701: parachute + 702: parallel bars + 703: park bench + 704: parking meter + 705: passenger car + 706: patio + 707: payphone + 708: pedestal + 709: pencil case + 710: pencil sharpener + 711: perfume + 712: Petri dish + 713: photocopier + 714: plectrum + 715: Pickelhaube + 716: picket fence + 717: pickup truck + 718: pier + 719: piggy bank + 720: pill bottle + 721: pillow + 722: ping-pong ball + 723: pinwheel + 724: pirate ship + 725: pitcher + 726: hand plane + 727: planetarium + 728: plastic bag + 729: plate rack + 730: plow + 731: plunger + 732: Polaroid camera + 733: pole + 734: police van + 735: poncho + 736: billiard table + 737: soda bottle + 738: pot + 739: potter's wheel + 740: power drill + 741: prayer rug + 742: printer + 743: prison + 744: projectile + 745: projector + 746: hockey puck + 747: punching bag + 748: purse + 749: quill + 750: quilt + 751: race car + 752: racket + 753: radiator + 754: radio + 755: radio telescope + 756: rain barrel + 757: recreational vehicle + 758: reel + 759: reflex camera + 760: refrigerator + 761: remote control + 762: restaurant + 763: revolver + 764: rifle + 765: rocking chair + 766: rotisserie + 767: eraser + 768: rugby ball + 769: ruler + 770: running shoe + 771: safe + 772: safety pin + 773: salt shaker + 774: sandal + 775: sarong + 776: saxophone + 777: scabbard + 778: weighing scale + 779: school bus + 780: schooner + 781: scoreboard + 782: CRT screen + 783: screw + 784: screwdriver + 785: seat belt + 786: sewing machine + 787: shield + 788: shoe store + 789: shoji + 790: shopping basket + 791: shopping cart + 792: shovel + 793: shower cap + 794: shower curtain + 795: ski + 796: ski mask + 797: sleeping bag + 798: slide rule + 799: sliding door + 800: slot machine + 801: snorkel + 802: snowmobile + 803: snowplow + 804: soap dispenser + 805: soccer ball + 806: sock + 807: solar thermal collector + 808: sombrero + 809: soup bowl + 810: space bar + 811: space heater + 812: space shuttle + 813: spatula + 814: motorboat + 815: spider web + 816: spindle + 817: sports car + 818: spotlight + 819: stage + 820: steam locomotive + 821: through arch bridge + 822: steel drum + 823: stethoscope + 824: scarf + 825: stone wall + 826: stopwatch + 827: stove + 828: strainer + 829: tram + 830: stretcher + 831: couch + 832: stupa + 833: submarine + 834: suit + 835: sundial + 836: sunglass + 837: sunglasses + 838: sunscreen + 839: suspension bridge + 840: mop + 841: sweatshirt + 842: swimsuit + 843: swing + 844: switch + 845: syringe + 846: table lamp + 847: tank + 848: tape player + 849: teapot + 850: teddy bear + 851: television + 852: tennis ball + 853: thatched roof + 854: front curtain + 855: thimble + 856: threshing machine + 857: throne + 858: tile roof + 859: toaster + 860: tobacco shop + 861: toilet seat + 862: torch + 863: totem pole + 864: tow truck + 865: toy store + 866: tractor + 867: semi-trailer truck + 868: tray + 869: trench coat + 870: tricycle + 871: trimaran + 872: tripod + 873: triumphal arch + 874: trolleybus + 875: trombone + 876: tub + 877: turnstile + 878: typewriter keyboard + 879: umbrella + 880: unicycle + 881: upright piano + 882: vacuum cleaner + 883: vase + 884: vault + 885: velvet + 886: vending machine + 887: vestment + 888: viaduct + 889: violin + 890: volleyball + 891: waffle iron + 892: wall clock + 893: wallet + 894: wardrobe + 895: military aircraft + 896: sink + 897: washing machine + 898: water bottle + 899: water jug + 900: water tower + 901: whiskey jug + 902: whistle + 903: wig + 904: window screen + 905: window shade + 906: Windsor tie + 907: wine bottle + 908: wing + 909: wok + 910: wooden spoon + 911: wool + 912: split-rail fence + 913: shipwreck + 914: yawl + 915: yurt + 916: website + 917: comic book + 918: crossword + 919: traffic sign + 920: traffic light + 921: dust jacket + 922: menu + 923: plate + 924: guacamole + 925: consomme + 926: hot pot + 927: trifle + 928: ice cream + 929: ice pop + 930: baguette + 931: bagel + 932: pretzel + 933: cheeseburger + 934: hot dog + 935: mashed potato + 936: cabbage + 937: broccoli + 938: cauliflower + 939: zucchini + 940: spaghetti squash + 941: acorn squash + 942: butternut squash + 943: cucumber + 944: artichoke + 945: bell pepper + 946: cardoon + 947: mushroom + 948: Granny Smith + 949: strawberry + 950: orange + 951: lemon + 952: fig + 953: pineapple + 954: banana + 955: jackfruit + 956: custard apple + 957: pomegranate + 958: hay + 959: carbonara + 960: chocolate syrup + 961: dough + 962: meatloaf + 963: pizza + 964: pot pie + 965: burrito + 966: red wine + 967: espresso + 968: cup + 969: eggnog + 970: alp + 971: bubble + 972: cliff + 973: coral reef + 974: geyser + 975: lakeshore + 976: promontory + 977: shoal + 978: seashore + 979: valley + 980: volcano + 981: baseball player + 982: bridegroom + 983: scuba diver + 984: rapeseed + 985: daisy + 986: yellow lady's slipper + 987: corn + 988: acorn + 989: rose hip + 990: horse chestnut seed + 991: coral fungus + 992: agaric + 993: gyromitra + 994: stinkhorn mushroom + 995: earth star + 996: hen-of-the-woods + 997: bolete + 998: ear + 999: toilet paper + # Download script/URL (optional) download: data/scripts/get_imagenet.sh diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 4cc94753f530..05b26a1f4796 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -14,48 +14,372 @@ val: images/val # val images (relative to 'path') 80000 images test: # test images (optional) # Classes -nc: 365 # number of classes -names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', - 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', - 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', - 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV', - 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', - 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', - 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', - 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', - 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', - 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock', - 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', - 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', - 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', - 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', - 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', - 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', - 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', - 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', - 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', - 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', - 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', - 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette', - 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket', - 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', - 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', - 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon', - 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', - 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', - 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', - 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', - 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', - 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', - 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder', - 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', - 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab', - 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', - 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', - 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', - 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', - 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', - 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis'] +names: + 0: Person + 1: Sneakers + 2: Chair + 3: Other Shoes + 4: Hat + 5: Car + 6: Lamp + 7: Glasses + 8: Bottle + 9: Desk + 10: Cup + 11: Street Lights + 12: Cabinet/shelf + 13: Handbag/Satchel + 14: Bracelet + 15: Plate + 16: Picture/Frame + 17: Helmet + 18: Book + 19: Gloves + 20: Storage box + 21: Boat + 22: Leather Shoes + 23: Flower + 24: Bench + 25: Potted Plant + 26: Bowl/Basin + 27: Flag + 28: Pillow + 29: Boots + 30: Vase + 31: Microphone + 32: Necklace + 33: Ring + 34: SUV + 35: Wine Glass + 36: Belt + 37: Monitor/TV + 38: Backpack + 39: Umbrella + 40: Traffic Light + 41: Speaker + 42: Watch + 43: Tie + 44: Trash bin Can + 45: Slippers + 46: Bicycle + 47: Stool + 48: Barrel/bucket + 49: Van + 50: Couch + 51: Sandals + 52: Basket + 53: Drum + 54: Pen/Pencil + 55: Bus + 56: Wild Bird + 57: High Heels + 58: Motorcycle + 59: Guitar + 60: Carpet + 61: Cell Phone + 62: Bread + 63: Camera + 64: Canned + 65: Truck + 66: Traffic cone + 67: Cymbal + 68: Lifesaver + 69: Towel + 70: Stuffed Toy + 71: Candle + 72: Sailboat + 73: Laptop + 74: Awning + 75: Bed + 76: Faucet + 77: Tent + 78: Horse + 79: Mirror + 80: Power outlet + 81: Sink + 82: Apple + 83: Air Conditioner + 84: Knife + 85: Hockey Stick + 86: Paddle + 87: Pickup Truck + 88: Fork + 89: Traffic Sign + 90: Balloon + 91: Tripod + 92: Dog + 93: Spoon + 94: Clock + 95: Pot + 96: Cow + 97: Cake + 98: Dinning Table + 99: Sheep + 100: Hanger + 101: Blackboard/Whiteboard + 102: Napkin + 103: Other Fish + 104: Orange/Tangerine + 105: Toiletry + 106: Keyboard + 107: Tomato + 108: Lantern + 109: Machinery Vehicle + 110: Fan + 111: Green Vegetables + 112: Banana + 113: Baseball Glove + 114: Airplane + 115: Mouse + 116: Train + 117: Pumpkin + 118: Soccer + 119: Skiboard + 120: Luggage + 121: Nightstand + 122: Tea pot + 123: Telephone + 124: Trolley + 125: Head Phone + 126: Sports Car + 127: Stop Sign + 128: Dessert + 129: Scooter + 130: Stroller + 131: Crane + 132: Remote + 133: Refrigerator + 134: Oven + 135: Lemon + 136: Duck + 137: Baseball Bat + 138: Surveillance Camera + 139: Cat + 140: Jug + 141: Broccoli + 142: Piano + 143: Pizza + 144: Elephant + 145: Skateboard + 146: Surfboard + 147: Gun + 148: Skating and Skiing shoes + 149: Gas stove + 150: Donut + 151: Bow Tie + 152: Carrot + 153: Toilet + 154: Kite + 155: Strawberry + 156: Other Balls + 157: Shovel + 158: Pepper + 159: Computer Box + 160: Toilet Paper + 161: Cleaning Products + 162: Chopsticks + 163: Microwave + 164: Pigeon + 165: Baseball + 166: Cutting/chopping Board + 167: Coffee Table + 168: Side Table + 169: Scissors + 170: Marker + 171: Pie + 172: Ladder + 173: Snowboard + 174: Cookies + 175: Radiator + 176: Fire Hydrant + 177: Basketball + 178: Zebra + 179: Grape + 180: Giraffe + 181: Potato + 182: Sausage + 183: Tricycle + 184: Violin + 185: Egg + 186: Fire Extinguisher + 187: Candy + 188: Fire Truck + 189: Billiards + 190: Converter + 191: Bathtub + 192: Wheelchair + 193: Golf Club + 194: Briefcase + 195: Cucumber + 196: Cigar/Cigarette + 197: Paint Brush + 198: Pear + 199: Heavy Truck + 200: Hamburger + 201: Extractor + 202: Extension Cord + 203: Tong + 204: Tennis Racket + 205: Folder + 206: American Football + 207: earphone + 208: Mask + 209: Kettle + 210: Tennis + 211: Ship + 212: Swing + 213: Coffee Machine + 214: Slide + 215: Carriage + 216: Onion + 217: Green beans + 218: Projector + 219: Frisbee + 220: Washing Machine/Drying Machine + 221: Chicken + 222: Printer + 223: Watermelon + 224: Saxophone + 225: Tissue + 226: Toothbrush + 227: Ice cream + 228: Hot-air balloon + 229: Cello + 230: French Fries + 231: Scale + 232: Trophy + 233: Cabbage + 234: Hot dog + 235: Blender + 236: Peach + 237: Rice + 238: Wallet/Purse + 239: Volleyball + 240: Deer + 241: Goose + 242: Tape + 243: Tablet + 244: Cosmetics + 245: Trumpet + 246: Pineapple + 247: Golf Ball + 248: Ambulance + 249: Parking meter + 250: Mango + 251: Key + 252: Hurdle + 253: Fishing Rod + 254: Medal + 255: Flute + 256: Brush + 257: Penguin + 258: Megaphone + 259: Corn + 260: Lettuce + 261: Garlic + 262: Swan + 263: Helicopter + 264: Green Onion + 265: Sandwich + 266: Nuts + 267: Speed Limit Sign + 268: Induction Cooker + 269: Broom + 270: Trombone + 271: Plum + 272: Rickshaw + 273: Goldfish + 274: Kiwi fruit + 275: Router/modem + 276: Poker Card + 277: Toaster + 278: Shrimp + 279: Sushi + 280: Cheese + 281: Notepaper + 282: Cherry + 283: Pliers + 284: CD + 285: Pasta + 286: Hammer + 287: Cue + 288: Avocado + 289: Hamimelon + 290: Flask + 291: Mushroom + 292: Screwdriver + 293: Soap + 294: Recorder + 295: Bear + 296: Eggplant + 297: Board Eraser + 298: Coconut + 299: Tape Measure/Ruler + 300: Pig + 301: Showerhead + 302: Globe + 303: Chips + 304: Steak + 305: Crosswalk Sign + 306: Stapler + 307: Camel + 308: Formula 1 + 309: Pomegranate + 310: Dishwasher + 311: Crab + 312: Hoverboard + 313: Meat ball + 314: Rice Cooker + 315: Tuba + 316: Calculator + 317: Papaya + 318: Antelope + 319: Parrot + 320: Seal + 321: Butterfly + 322: Dumbbell + 323: Donkey + 324: Lion + 325: Urinal + 326: Dolphin + 327: Electric Drill + 328: Hair Dryer + 329: Egg tart + 330: Jellyfish + 331: Treadmill + 332: Lighter + 333: Grapefruit + 334: Game board + 335: Mop + 336: Radish + 337: Baozi + 338: Target + 339: French + 340: Spring Rolls + 341: Monkey + 342: Rabbit + 343: Pencil Case + 344: Yak + 345: Red Cabbage + 346: Binoculars + 347: Asparagus + 348: Barbell + 349: Scallop + 350: Noddles + 351: Comb + 352: Dumpling + 353: Oyster + 354: Table Tennis paddle + 355: Cosmetics Brush/Eyeliner Pencil + 356: Chainsaw + 357: Eraser + 358: Lobster + 359: Durian + 360: Okra + 361: Lipstick + 362: Cosmetics Mirror + 363: Curling + 364: Table Tennis # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 2acf34d155bd..edae7171c660 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -14,8 +14,8 @@ val: val.txt # val images (relative to 'path') 588 images test: test.txt # test images (optional) 2936 images # Classes -nc: 1 # number of classes -names: ['object'] # class names +names: + 0: object # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/VOC.yaml b/data/VOC.yaml index 636ddc42d46c..bbe5cf90a838 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -20,9 +20,27 @@ test: # test images (optional) - images/test2007 # Classes -nc: 20 # number of classes -names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', - 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names +names: + 0: aeroplane + 1: bicycle + 2: bird + 3: boat + 4: bottle + 5: bus + 6: car + 7: cat + 8: chair + 9: cow + 10: diningtable + 11: dog + 12: horse + 13: motorbike + 14: person + 15: pottedplant + 16: sheep + 17: sofa + 18: train + 19: tvmonitor # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 10337b46f104..a8bcf8e628ec 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -14,8 +14,17 @@ val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images # Classes -nc: 10 # number of classes -names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'] +names: + 0: pedestrian + 1: people + 2: bicycle + 3: car + 4: van + 5: truck + 6: tricycle + 7: awning-tricycle + 8: bus + 9: motor # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/coco.yaml b/data/coco.yaml index 0c0c4adab05d..d64dfc7fed76 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -14,16 +14,87 @@ val: val2017.txt # val images (relative to 'path') 5000 images test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 # Classes -nc: 80 # number of classes -names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', - 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', - 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', - 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', - 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush'] # class names +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush # Download script/URL (optional) diff --git a/data/coco128.yaml b/data/coco128.yaml index 2517d2079257..12556736a571 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -14,16 +14,87 @@ val: images/train2017 # val images (relative to 'path') 128 images test: # test images (optional) # Classes -nc: 80 # number of classes -names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', - 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', - 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', - 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', - 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush'] # class names +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush # Download script/URL (optional) diff --git a/data/xView.yaml b/data/xView.yaml index 3b38f1ff4439..b134ceac8164 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -14,16 +14,67 @@ train: images/autosplit_train.txt # train images (relative to 'path') 90% of 84 val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images # Classes -nc: 60 # number of classes -names: ['Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter', 'Passenger Vehicle', 'Small Car', 'Bus', - 'Pickup Truck', 'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box', 'Truck Tractor', 'Trailer', - 'Truck w/Flatbed', 'Truck w/Liquid', 'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car', - 'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat', 'Sailboat', 'Tugboat', 'Barge', - 'Fishing Vessel', 'Ferry', 'Yacht', 'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane', - 'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane', 'Dump Truck', 'Haul Truck', - 'Scraper/Tractor', 'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader', 'Hut/Tent', 'Shed', - 'Building', 'Aircraft Hangar', 'Damaged Building', 'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad', - 'Storage Tank', 'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower'] # class names +names: + 0: Fixed-wing Aircraft + 1: Small Aircraft + 2: Cargo Plane + 3: Helicopter + 4: Passenger Vehicle + 5: Small Car + 6: Bus + 7: Pickup Truck + 8: Utility Truck + 9: Truck + 10: Cargo Truck + 11: Truck w/Box + 12: Truck Tractor + 13: Trailer + 14: Truck w/Flatbed + 15: Truck w/Liquid + 16: Crane Truck + 17: Railway Vehicle + 18: Passenger Car + 19: Cargo Car + 20: Flat Car + 21: Tank car + 22: Locomotive + 23: Maritime Vessel + 24: Motorboat + 25: Sailboat + 26: Tugboat + 27: Barge + 28: Fishing Vessel + 29: Ferry + 30: Yacht + 31: Container Ship + 32: Oil Tanker + 33: Engineering Vehicle + 34: Tower crane + 35: Container Crane + 36: Reach Stacker + 37: Straddle Carrier + 38: Mobile Crane + 39: Dump Truck + 40: Haul Truck + 41: Scraper/Tractor + 42: Front loader/Bulldozer + 43: Excavator + 44: Cement Mixer + 45: Ground Grader + 46: Hut/Tent + 47: Shed + 48: Building + 49: Aircraft Hangar + 50: Damaged Building + 51: Facility + 52: Construction Site + 53: Vehicle Lot + 54: Helipad + 55: Storage Tank + 56: Shipping container lot + 57: Shipping Container + 58: Pylon + 59: Tower # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/models/common.py b/models/common.py index 17e40e60d7d7..30202ca1abd7 100644 --- a/models/common.py +++ b/models/common.py @@ -449,7 +449,7 @@ def wrap_frozen_graph(gd, inputs, outputs): # class names if 'names' not in locals(): - names = yaml_load(data)['names'] if data else [f'class{i}' for i in range(999)] + names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)} if names[0] == 'n01440764' and len(names) == 1000: # ImageNet names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 2c04040bf25d..33e84ce4056e 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1004,7 +1004,7 @@ def __init__(self, path='coco128.yaml', autodownload=False): self.hub_dir = Path(data['path'] + '-hub') self.im_dir = self.hub_dir / 'images' self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images - self.stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary + self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary self.data = data @staticmethod diff --git a/utils/general.py b/utils/general.py index 1c525c45f649..76872b696d79 100755 --- a/utils/general.py +++ b/utils/general.py @@ -481,11 +481,11 @@ def check_dataset(data, autodownload=True): data = yaml.safe_load(f) # dictionary # Checks - for k in 'train', 'val', 'nc': + for k in 'train', 'val', 'names': assert k in data, f"data.yaml '{k}:' field missing ❌" - if 'names' not in data: - LOGGER.warning("data.yaml 'names:' field missing ⚠️, assigning default names 'class0', 'class1', etc.") - data['names'] = [f'class{i}' for i in range(data['nc'])] # default names + if isinstance(data['names'], (list, tuple)): # old array format + data['names'] = dict(enumerate(data['names'])) # convert to dict + data['nc'] = len(data['names']) # Resolve paths path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' diff --git a/val.py b/val.py index 130496233467..ce743b506aff 100644 --- a/val.py +++ b/val.py @@ -182,7 +182,9 @@ def run( seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) - names = dict(enumerate(model.names if hasattr(model, 'names') else model.module.names)) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 From 64e0757edffc6b2e927e16c8e2aa26439aceb4ce Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 18 Aug 2022 02:11:43 +0530 Subject: [PATCH 014/326] [Classify]: Allow inference on dirs and videos (#9003) * allow image dirs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update predict.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py * Update predict.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update predict.py * Update predict.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- classify/predict.py | 64 +++++++++++++++++++++++--------------------- utils/dataloaders.py | 25 ++++++++--------- 2 files changed, 46 insertions(+), 43 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 87379e42159b..7af5f60a2b9d 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run classification inference on images +Run classification inference on file/dir/URL/glob Usage: $ python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg @@ -11,7 +11,6 @@ import sys from pathlib import Path -import cv2 import torch.nn.functional as F FILE = Path(__file__).resolve() @@ -20,27 +19,31 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from classify.train import imshow_cls from models.common import DetectMultiBackend from utils.augmentations import classify_transforms -from utils.general import LOGGER, check_requirements, colorstr, increment_path, print_args +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages +from utils.general import LOGGER, check_file, check_requirements, colorstr, increment_path, print_args from utils.torch_utils import select_device, smart_inference_mode, time_sync @smart_inference_mode() def run( weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - source=ROOT / 'data/images/bus.jpg', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob imgsz=224, # inference size device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference - show=True, project=ROOT / 'runs/predict-cls', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment ): - file = str(source) + source = str(source) + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + if is_url and is_file: + source = check_file(source) # download + seen, dt = 1, [0.0, 0.0, 0.0] device = select_device(device) @@ -48,37 +51,36 @@ def run( save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run save_dir.mkdir(parents=True, exist_ok=True) # make dir - # Transforms - transforms = classify_transforms(imgsz) - # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) model.warmup(imgsz=(1, 3, imgsz, imgsz)) # warmup - - # Image - t1 = time_sync() - im = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB) - im = transforms(im).unsqueeze(0).to(device) - im = im.half() if model.fp16 else im.float() - t2 = time_sync() - dt[0] += t2 - t1 - - # Inference - results = model(im) - t3 = time_sync() - dt[1] += t3 - t2 - - p = F.softmax(results, dim=1) # probabilities - i = p.argsort(1, descending=True)[:, :5].squeeze() # top 5 indices - dt[2] += time_sync() - t3 - LOGGER.info(f"image 1/1 {file}: {imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i.tolist())}") + dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz)) + for path, im, im0s, vid_cap, s in dataset: + # Image + t1 = time_sync() + im = im.unsqueeze(0).to(device) + im = im.half() if model.fp16 else im.float() + t2 = time_sync() + dt[0] += t2 - t1 + + # Inference + results = model(im) + t3 = time_sync() + dt[1] += t3 - t2 + + # Post-process + p = F.softmax(results, dim=1) # probabilities + i = p.argsort(1, descending=True)[:, :5].squeeze().tolist() # top 5 indices + dt[2] += time_sync() - t3 + # if save: + # imshow_cls(im, f=save_dir / Path(path).name, verbose=True) + seen += 1 + LOGGER.info(f"{s}{imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}") # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image shape = (1, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) - if show: - imshow_cls(im, f=save_dir / Path(file).name, verbose=True) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") return p @@ -86,7 +88,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images/bus.jpg', help='file') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 33e84ce4056e..3f26be2cd32d 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -186,7 +186,7 @@ def __iter__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` - def __init__(self, path, img_size=640, stride=32, auto=True): + def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None): files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: p = str(Path(p).resolve()) @@ -210,6 +210,7 @@ def __init__(self, path, img_size=640, stride=32, auto=True): self.video_flag = [False] * ni + [True] * nv self.mode = 'image' self.auto = auto + self.transforms = transforms # optional if any(videos): self.new_video(videos[0]) # new video else: @@ -229,7 +230,7 @@ def __next__(self): if self.video_flag[self.count]: # Read video self.mode = 'video' - ret_val, img0 = self.cap.read() + ret_val, im0 = self.cap.read() while not ret_val: self.count += 1 self.cap.release() @@ -237,7 +238,7 @@ def __next__(self): raise StopIteration path = self.files[self.count] self.new_video(path) - ret_val, img0 = self.cap.read() + ret_val, im0 = self.cap.read() self.frame += 1 s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' @@ -245,18 +246,18 @@ def __next__(self): else: # Read image self.count += 1 - img0 = cv2.imread(path) # BGR - assert img0 is not None, f'Image Not Found {path}' + im0 = cv2.imread(path) # BGR + assert im0 is not None, f'Image Not Found {path}' s = f'image {self.count}/{self.nf} {path}: ' - # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] - - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) + if self.transforms: + im = self.transforms(cv2.cvtColor(im0, cv2.COLOR_BGR2RGB)) # classify transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous - return path, img, img0, self.cap, s + return path, im, im0, self.cap, s def new_video(self, path): self.frame = 0 From 0922bc2082d8c754bbd733d90bd1ccd2aea79ee9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 22:50:08 +0200 Subject: [PATCH 015/326] DockerHub tag update Usage example (#9005) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 2280f209e6a1..cf2c1c5cb3cb 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -49,11 +49,8 @@ ENV OMP_NUM_THREADS=8 # Kill all image-based # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) -# Bash into running container -# sudo docker exec -it 5a9b5863d93d bash - -# Bash into stopped container -# id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash +# DockerHub tag update +# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew # Clean up # docker system prune -a --volumes From 6728dad76df8d62ed3c08e39c224a773d20582a0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 22:57:55 +0200 Subject: [PATCH 016/326] Add weight `decay` to argparser (#9006) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/classify/train.py b/classify/train.py index b85f14236039..d55dc066d7a3 100644 --- a/classify/train.py +++ b/classify/train.py @@ -136,7 +136,7 @@ def train(opt, device): logger.log_graph(model, imgsz) # log model # Optimizer - optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=5e-5) + optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay) # Scheduler lrf = 0.01 # final lr (fraction of lr0) @@ -280,6 +280,7 @@ def parse_opt(known=False): parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False') parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer') parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate') + parser.add_argument('--decay', type=float, default=5e-5, help='weight decay') parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon') parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head') parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)') From e08d568d39a8b1c24ec7eb54da80cf3b22f64f07 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 01:08:52 +0200 Subject: [PATCH 017/326] Add glob quotes to detect.py usage example (#9007) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index c699a749a09f..dd60b87ca33a 100644 --- a/detect.py +++ b/detect.py @@ -7,7 +7,7 @@ img.jpg # image vid.mp4 # video path/ # directory - path/*.jpg # glob + 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream From 5c854fab5e43df82ebfd51197c2dc58e5212c5a6 Mon Sep 17 00:00:00 2001 From: glennjocher Date: Thu, 18 Aug 2022 02:44:50 +0200 Subject: [PATCH 018/326] requires grad after reset params --- classify/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/classify/train.py b/classify/train.py index d55dc066d7a3..9fb7c52b545a 100644 --- a/classify/train.py +++ b/classify/train.py @@ -114,13 +114,13 @@ def train(opt, device): LOGGER.warning("WARNING: pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model reshape_classifier_output(model, nc) # update class count - for p in model.parameters(): - p.requires_grad = True # for training for m in model.modules(): if not pretrained and hasattr(m, 'reset_parameters'): m.reset_parameters() if isinstance(m, torch.nn.Dropout) and opt.dropout is not None: m.p = opt.dropout # set dropout + for p in model.parameters(): + p.requires_grad = True # for training model = model.to(device) names = trainloader.dataset.classes # class names model.names = names # attach class names From 529aafd737053264cf8676b29c37f5d5300460eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 11:50:24 +0200 Subject: [PATCH 019/326] Fix TorchScript JSON string key bug (#9015) * Fix TorchScript JSON string key bug Resolves https://github.com/ultralytics/yolov5/issues/9011 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 30202ca1abd7..4f93887c55e0 100644 --- a/models/common.py +++ b/models/common.py @@ -337,8 +337,10 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, extra_files = {'config.txt': ''} # model metadata model = torch.jit.load(w, _extra_files=extra_files) model.half() if fp16 else model.float() - if extra_files['config.txt']: - d = json.loads(extra_files['config.txt']) # extra_files dict + if extra_files['config.txt']: # load metadata dict + d = json.loads(extra_files['config.txt'], + object_hook=lambda d: {int(k) if k.isdigit() else k: v + for k, v in d.items()}) stride, names = int(d['stride']), d['names'] elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') From 20049be2e7dc6f330e3620dd82761bc3f4d02e36 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 14:06:15 +0200 Subject: [PATCH 020/326] EMA FP32 assert classification bug fix (#9016) * Return EMA float on classification val * verbose val fix * EMA check --- classify/val.py | 3 ++- export.py | 2 +- models/experimental.py | 10 +++++++--- train.py | 3 +-- utils/torch_utils.py | 7 +++---- 5 files changed, 14 insertions(+), 11 deletions(-) diff --git a/classify/val.py b/classify/val.py index 9d965d9f1fdc..b76fb5147ecd 100644 --- a/classify/val.py +++ b/classify/val.py @@ -116,7 +116,7 @@ def run( if verbose: # all classes LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") - for i, c in enumerate(model.names): + for i, c in model.names.items(): aci = acc[targets == i] top1i, top5i = aci.mean(0).tolist() LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") @@ -127,6 +127,7 @@ def run( LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + model.float() # for training return top1, top5, loss diff --git a/export.py b/export.py index 595039b24bce..7b398fdc4d93 100644 --- a/export.py +++ b/export.py @@ -599,7 +599,7 @@ def parse_opt(): parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') parser.add_argument('--include', nargs='+', - default=['torchscript', 'onnx'], + default=['torchscript'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') opt = parser.parse_args() print_args(vars(opt)) diff --git a/models/experimental.py b/models/experimental.py index cb32d01ba46a..02d35b9ebd11 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -8,7 +8,6 @@ import torch import torch.nn as nn -from models.common import Conv from utils.downloads import attempt_download @@ -79,11 +78,16 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location='cpu') # load ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model + + # Model compatibility updates if not hasattr(ckpt, 'stride'): - ckpt.stride = torch.tensor([32.]) # compatibility update for ResNet etc. + ckpt.stride = torch.tensor([32.]) + if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)): + ckpt.names = dict(enumerate(ckpt.names)) # convert to dict + model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode - # Compatibility updates + # Module compatibility updates for m in model.modules(): t = type(m) if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): diff --git a/train.py b/train.py index bbb26cdeafeb..10a3bdb56002 100644 --- a/train.py +++ b/train.py @@ -107,8 +107,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio data_dict = data_dict or check_dataset(data) # check if None train_path, val_path = data_dict['train'], data_dict['val'] nc = 1 if single_cls else int(data_dict['nc']) # number of classes - names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset # Model diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 1cdbe20f8670..ed56064ce02e 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -408,8 +408,6 @@ class ModelEMA: def __init__(self, model, decay=0.9999, tau=2000, updates=0): # Create EMA self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA - # if next(model.parameters()).device.type != 'cpu': - # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) for p in self.ema.parameters(): @@ -423,9 +421,10 @@ def update(self, model): msd = de_parallel(model).state_dict() # model state_dict for k, v in self.ema.state_dict().items(): - if v.dtype.is_floating_point: + if v.dtype.is_floating_point: # true for FP16 and FP32 v *= d - v += (1 - d) * msd[k].detach() + v += (1 - d) * msd[k] + assert v.dtype == msd[k].dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must be updated in FP32' def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes From c0e7a776cd55e8c01b63714f6f7fea3d53f6bf5b Mon Sep 17 00:00:00 2001 From: cher-liang <88578531+cher-liang@users.noreply.github.com> Date: Thu, 18 Aug 2022 20:18:02 +0800 Subject: [PATCH 021/326] Faster pre-processing for gray image input (#9009) * faster 1 channel to 3 channels image conversion * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 4f93887c55e0..f914c9d60fdb 100644 --- a/models/common.py +++ b/models/common.py @@ -617,7 +617,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input + im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input s = im.shape[:2] # HWC shape0.append(s) # image shape g = (size / max(s)) # gain From d40cd0d454dcc34312cb5c40f45f64b76665c40c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 19:55:38 +0200 Subject: [PATCH 022/326] Improved `Profile()` inference timing (#9024) * Improved `Profile()` class * Update predict.py * Update val.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update val.py * Update AutoShape Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 37 +++++++------- classify/val.py | 29 +++++------ detect.py | 35 ++++++------- models/common.py | 117 ++++++++++++++++++++++---------------------- utils/general.py | 18 +++++-- val.py | 31 ++++++------ 6 files changed, 133 insertions(+), 134 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 7af5f60a2b9d..0bf99140b8e3 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -22,8 +22,8 @@ from models.common import DetectMultiBackend from utils.augmentations import classify_transforms from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages -from utils.general import LOGGER, check_file, check_requirements, colorstr, increment_path, print_args -from utils.torch_utils import select_device, smart_inference_mode, time_sync +from utils.general import LOGGER, Profile, check_file, check_requirements, colorstr, increment_path, print_args +from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() @@ -44,7 +44,7 @@ def run( if is_url and is_file: source = check_file(source) # download - seen, dt = 1, [0.0, 0.0, 0.0] + dt = Profile(), Profile(), Profile() device = select_device(device) # Directories @@ -55,30 +55,27 @@ def run( model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) model.warmup(imgsz=(1, 3, imgsz, imgsz)) # warmup dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz)) - for path, im, im0s, vid_cap, s in dataset: + for seen, (path, im, im0s, vid_cap, s) in enumerate(dataset): # Image - t1 = time_sync() - im = im.unsqueeze(0).to(device) - im = im.half() if model.fp16 else im.float() - t2 = time_sync() - dt[0] += t2 - t1 + with dt[0]: + im = im.unsqueeze(0).to(device) + im = im.half() if model.fp16 else im.float() # Inference - results = model(im) - t3 = time_sync() - dt[1] += t3 - t2 + with dt[1]: + results = model(im) # Post-process - p = F.softmax(results, dim=1) # probabilities - i = p.argsort(1, descending=True)[:, :5].squeeze().tolist() # top 5 indices - dt[2] += time_sync() - t3 - # if save: - # imshow_cls(im, f=save_dir / Path(path).name, verbose=True) - seen += 1 - LOGGER.info(f"{s}{imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}") + with dt[2]: + p = F.softmax(results, dim=1) # probabilities + i = p.argsort(1, descending=True)[:, :5].squeeze().tolist() # top 5 indices + # if save: + # imshow_cls(im, f=save_dir / Path(path).name, verbose=True) + LOGGER.info( + f"{s}{imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}, {dt[1].dt * 1E3:.1f}ms") # Print results - t = tuple(x / seen * 1E3 for x in dt) # speeds per image + t = tuple(x.t / (seen + 1) * 1E3 for x in dt) # speeds per image shape = (1, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") diff --git a/classify/val.py b/classify/val.py index b76fb5147ecd..c91e2cf82c81 100644 --- a/classify/val.py +++ b/classify/val.py @@ -23,8 +23,8 @@ from models.common import DetectMultiBackend from utils.dataloaders import create_classification_dataloader -from utils.general import LOGGER, check_img_size, check_requirements, colorstr, increment_path, print_args -from utils.torch_utils import select_device, smart_inference_mode, time_sync +from utils.general import LOGGER, Profile, check_img_size, check_requirements, colorstr, increment_path, print_args +from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() @@ -83,27 +83,24 @@ def run( workers=workers) model.eval() - pred, targets, loss, dt = [], [], 0, [0.0, 0.0, 0.0] + pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" bar = tqdm(dataloader, desc, n, not training, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0) with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): for images, labels in bar: - t1 = time_sync() - images, labels = images.to(device, non_blocking=True), labels.to(device) - t2 = time_sync() - dt[0] += t2 - t1 + with dt[0]: + images, labels = images.to(device, non_blocking=True), labels.to(device) - y = model(images) - t3 = time_sync() - dt[1] += t3 - t2 + with dt[1]: + y = model(images) - pred.append(y.argsort(1, descending=True)[:, :5]) - targets.append(labels) - if criterion: - loss += criterion(y, labels) - dt[2] += time_sync() - t3 + with dt[2]: + pred.append(y.argsort(1, descending=True)[:, :5]) + targets.append(labels) + if criterion: + loss += criterion(y, labels) loss /= n pred, targets = torch.cat(pred), torch.cat(targets) @@ -122,7 +119,7 @@ def run( LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") # Print results - t = tuple(x / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image + t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image shape = (1, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") diff --git a/detect.py b/detect.py index dd60b87ca33a..93ae0baccd13 100644 --- a/detect.py +++ b/detect.py @@ -41,10 +41,10 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams -from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import select_device, smart_inference_mode, time_sync +from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() @@ -107,26 +107,23 @@ def run( # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], [0.0, 0.0, 0.0] + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: - t1 = time_sync() - im = torch.from_numpy(im).to(device) - im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - t2 = time_sync() - dt[0] += t2 - t1 + with dt[0]: + im = torch.from_numpy(im).to(device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim # Inference - visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False - pred = model(im, augment=augment, visualize=visualize) - t3 = time_sync() - dt[1] += t3 - t2 + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(im, augment=augment, visualize=visualize) # NMS - pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - dt[2] += time_sync() - t3 + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) @@ -201,10 +198,10 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results - t = tuple(x / seen * 1E3 for x in dt) # speeds per image + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' diff --git a/models/common.py b/models/common.py index f914c9d60fdb..33aa2ac12465 100644 --- a/models/common.py +++ b/models/common.py @@ -21,10 +21,11 @@ from torch.cuda import amp from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, check_requirements, check_suffix, check_version, colorstr, increment_path, - make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh, yaml_load) +from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, + increment_path, make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh, + yaml_load) from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import copy_attr, smart_inference_mode, time_sync +from utils.torch_utils import copy_attr, smart_inference_mode def autopad(k, p=None): # kernel, padding @@ -587,9 +588,9 @@ def _apply(self, fn): return self @smart_inference_mode() - def forward(self, imgs, size=640, augment=False, profile=False): + def forward(self, ims, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # file: imgs = 'data/images/zidane.jpg' # str or PosixPath + # file: ims = 'data/images/zidane.jpg' # str or PosixPath # URI: = 'https://ultralytics.com/images/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) @@ -597,65 +598,65 @@ def forward(self, imgs, size=640, augment=False, profile=False): # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - t = [time_sync()] - p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # for device, type - autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference - if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(autocast): - return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference - - # Pre-process - n, imgs = (len(imgs), list(imgs)) if isinstance(imgs, (list, tuple)) else (1, [imgs]) # number, list of images - shape0, shape1, files = [], [], [] # image and inference shapes, filenames - for i, im in enumerate(imgs): - f = f'image{i}' # filename - if isinstance(im, (str, Path)): # filename or uri - im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im - im = np.asarray(exif_transpose(im)) - elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f - files.append(Path(f).with_suffix('.jpg').name) - if im.shape[0] < 5: # image in CHW - im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input - s = im.shape[:2] # HWC - shape0.append(s) # image shape - g = (size / max(s)) # gain - shape1.append([y * g for y in s]) - imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape - x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad - x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 - t.append(time_sync()) + dt = (Profile(), Profile(), Profile()) + with dt[0]: + p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # param + autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference + if isinstance(ims, torch.Tensor): # torch + with amp.autocast(autocast): + return self.model(ims.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(ims): + f = f'image{i}' # filename + if isinstance(im, (str, Path)): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im + im = np.asarray(exif_transpose(im)) + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update + shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape + x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad + x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 with amp.autocast(autocast): # Inference - y = self.model(x, augment, profile) # forward - t.append(time_sync()) + with dt[1]: + y = self.model(x, augment, profile) # forward # Post-process - y = non_max_suppression(y if self.dmb else y[0], - self.conf, - self.iou, - self.classes, - self.agnostic, - self.multi_label, - max_det=self.max_det) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) + with dt[2]: + y = non_max_suppression(y if self.dmb else y[0], + self.conf, + self.iou, + self.classes, + self.agnostic, + self.multi_label, + max_det=self.max_det) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) - t.append(time_sync()) - return Detections(imgs, y, files, t, self.names, x.shape) + return Detections(ims, y, files, dt, self.names, x.shape) class Detections: # YOLOv5 detections class for inference results - def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): + def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): super().__init__() d = pred[0].device # device - gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations - self.imgs = imgs # list of images as numpy arrays + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations + self.ims = ims # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names self.files = files # image filenames @@ -665,12 +666,12 @@ def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) # number of images (batch size) - self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) self.s = shape # inference BCHW shape def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): crops = [] - for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): + for i, (im, pred) in enumerate(zip(self.ims, self.pred)): s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string if pred.shape[0]: for c in pred[:, -1].unique(): @@ -705,7 +706,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False if i == self.n - 1: LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: - self.imgs[i] = np.asarray(im) + self.ims[i] = np.asarray(im) if crop: if save: LOGGER.info(f'Saved results to {save_dir}\n') @@ -728,7 +729,7 @@ def crop(self, save=True, save_dir='runs/detect/exp'): def render(self, labels=True): self.display(render=True, labels=labels) # render results - return self.imgs + return self.ims def pandas(self): # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) @@ -743,9 +744,9 @@ def pandas(self): def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' r = range(self.n) # iterable - x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] + x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] # for d in x: - # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: # setattr(d, k, getattr(d, k)[0]) # pop out of list return x diff --git a/utils/general.py b/utils/general.py index 76872b696d79..42d000918c13 100755 --- a/utils/general.py +++ b/utils/general.py @@ -141,16 +141,26 @@ def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): class Profile(contextlib.ContextDecorator): - # Usage: @Profile() decorator or 'with Profile():' context manager + # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager + def __init__(self, t=0.0): + self.t = t + self.cuda = torch.cuda.is_available() + def __enter__(self): - self.start = time.time() + self.start = self.time() def __exit__(self, type, value, traceback): - print(f'Profile results: {time.time() - self.start:.5f}s') + self.dt = self.time() - self.start # delta-time + self.t += self.dt # accumulate dt + + def time(self): + if self.cuda: + torch.cuda.synchronize() + return time.time() class Timeout(contextlib.ContextDecorator): - # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): self.seconds = int(seconds) self.timeout_message = timeout_msg diff --git a/val.py b/val.py index ce743b506aff..876fc5bf50bb 100644 --- a/val.py +++ b/val.py @@ -37,7 +37,7 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader -from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_yaml, +from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou @@ -187,26 +187,24 @@ def run( names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') - dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): callbacks.run('on_val_batch_start') - t1 = time_sync() - if cuda: - im = im.to(device, non_blocking=True) - targets = targets.to(device) - im = im.half() if half else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - nb, _, height, width = im.shape # batch size, channels, height, width - t2 = time_sync() - dt[0] += t2 - t1 + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width # Inference - out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs - dt[1] += time_sync() - t2 + with dt[1]: + out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs # Loss if compute_loss: @@ -215,9 +213,8 @@ def run( # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - t3 = time_sync() - out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - dt[2] += time_sync() - t3 + with dt[2]: + out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) # Metrics for si, pred in enumerate(out): @@ -284,7 +281,7 @@ def run( LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds - t = tuple(x / seen * 1E3 for x in dt) # speeds per image + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) From 61adf017f231f470afca2636f1f13e4cce13914b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 20:12:33 +0200 Subject: [PATCH 023/326] `torch.empty()` for speed improvements (#9025) `torch.empty()` for speed improvement Signed-off-by: Glenn Jocher --- models/common.py | 4 ++-- models/yolo.py | 6 +++--- utils/autobatch.py | 2 +- utils/loggers/__init__.py | 2 +- utils/torch_utils.py | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/models/common.py b/models/common.py index 33aa2ac12465..44192e622bb5 100644 --- a/models/common.py +++ b/models/common.py @@ -531,7 +531,7 @@ def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb if any(warmup_types) and self.device.type != 'cpu': - im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input + im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup @@ -600,7 +600,7 @@ def forward(self, ims, size=640, augment=False, profile=False): dt = (Profile(), Profile(), Profile()) with dt[0]: - p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # param + p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(ims, torch.Tensor): # torch with amp.autocast(autocast): diff --git a/models/yolo.py b/models/yolo.py index df4209726e0d..32a47e9591da 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -46,8 +46,8 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid + self.grid = [torch.empty(1)] * self.nl # init grid + self.anchor_grid = [torch.empty(1)] * self.nl # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) @@ -175,7 +175,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i if isinstance(m, Detect): s = 256 # 2x min stride m.inplace = self.inplace - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.empty(1, ch, s, s))]) # forward check_anchor_order(m) # must be in pixel-space (not grid-space) m.anchors /= m.stride.view(-1, 1, 1) self.stride = m.stride diff --git a/utils/autobatch.py b/utils/autobatch.py index c231d24c0706..07cddc99f400 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -47,7 +47,7 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): # Profile batch sizes batch_sizes = [1, 2, 4, 8, 16] try: - img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes] + img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] results = profile(img, model, n=3, device=device) except Exception as e: LOGGER.warning(f'{prefix}{e}') diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 8ec846f8cfac..34704b625294 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -300,7 +300,7 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): try: p = next(model.parameters()) # for device, type imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand - im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image + im = torch.empty((1, 3, *imgsz)).to(p.device).type_as(p) # input image with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index ed56064ce02e..4de2520b26a2 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -282,7 +282,7 @@ def model_info(model, verbose=False, imgsz=640): try: # FLOPs p = next(model.parameters()) stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride - im = torch.zeros((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format + im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs From de6e6c0110adbb41f829c1288d5cdab7105892ae Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 20:23:14 +0200 Subject: [PATCH 024/326] Created using Colaboratory --- tutorial.ipynb | 136 ++++++++++++++++++++++++------------------------- 1 file changed, 68 insertions(+), 68 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 1438924e4112..97e572798427 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -17,7 +17,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "57c562894aed45cd9a107d0455e3e3f4": { + "6d6b90ead2db49b3bdf624b6ba9b44e9": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -32,14 +32,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_040d53c6cc924350bcb656cd21a7c713", - "IPY_MODEL_e029890942a74c098408ce5a9a566d51", - "IPY_MODEL_8fb991c03e434566a4297b6ab9446f89" + "IPY_MODEL_cb77443edb9e43328a56aaa4413a0df3", + "IPY_MODEL_954c8b8699e143bf92be6bfc02fc52f6", + "IPY_MODEL_a64775946e13477f83d8bba6086385b9" ], - "layout": "IPY_MODEL_a9a376923a7742d89fb335db709c7a7e" + "layout": "IPY_MODEL_1413611b7f4f4ef99e4f541f5ca35ed6" } }, - "040d53c6cc924350bcb656cd21a7c713": { + "cb77443edb9e43328a56aaa4413a0df3": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -54,13 +54,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_8b4276ac834c4735bf60ee9b761b9962", + "layout": "IPY_MODEL_00737f5558eb4fbd968172acb978e54a", "placeholder": "​", - "style": "IPY_MODEL_52cc8da75b724198856617247541cb1e", + "style": "IPY_MODEL_f03e5ddfd1c04bedaf68ab02c3f6f0ea", "value": "100%" } }, - "e029890942a74c098408ce5a9a566d51": { + "954c8b8699e143bf92be6bfc02fc52f6": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -76,15 +76,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_b6652f46480243c4adf60e6440043d6f", + "layout": "IPY_MODEL_6926db7e0035455f99e1dd4508c4b19c", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_e502754177ff4ea8abf82a6e9ac77a4a", + "style": "IPY_MODEL_a6a52c9f828b458e97ddf7a11ae9275f", "value": 818322941 } }, - "8fb991c03e434566a4297b6ab9446f89": { + "a64775946e13477f83d8bba6086385b9": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -99,13 +99,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_447398becdb04836b5ffb5915318db07", + "layout": "IPY_MODEL_c4c7dc45a1c24dc4b2c709e21271a37e", "placeholder": "​", - "style": "IPY_MODEL_2fddcb27ad4a4caa81ff51111f8d0ed6", - "value": " 780M/780M [01:17<00:00, 12.3MB/s]" + "style": "IPY_MODEL_09c43ffe2c7e4bdc9489e83f9d82ab73", + "value": " 780M/780M [01:12<00:00, 23.8MB/s]" } }, - "a9a376923a7742d89fb335db709c7a7e": { + "1413611b7f4f4ef99e4f541f5ca35ed6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -157,7 +157,7 @@ "width": null } }, - "8b4276ac834c4735bf60ee9b761b9962": { + "00737f5558eb4fbd968172acb978e54a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -209,7 +209,7 @@ "width": null } }, - "52cc8da75b724198856617247541cb1e": { + "f03e5ddfd1c04bedaf68ab02c3f6f0ea": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -224,7 +224,7 @@ "description_width": "" } }, - "b6652f46480243c4adf60e6440043d6f": { + "6926db7e0035455f99e1dd4508c4b19c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -276,7 +276,7 @@ "width": null } }, - "e502754177ff4ea8abf82a6e9ac77a4a": { + "a6a52c9f828b458e97ddf7a11ae9275f": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -292,7 +292,7 @@ "description_width": "" } }, - "447398becdb04836b5ffb5915318db07": { + "c4c7dc45a1c24dc4b2c709e21271a37e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -344,7 +344,7 @@ "width": null } }, - "2fddcb27ad4a4caa81ff51111f8d0ed6": { + "09c43ffe2c7e4bdc9489e83f9d82ab73": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -404,7 +404,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "e0f693e4-413b-4cc8-ae7e-91537da370b0" + "outputId": "508de90c-846e-495d-c7d6-50681af62a98" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -421,7 +421,7 @@ "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" ] }, { @@ -461,7 +461,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "941d625b-01a1-4f1b-dfd2-d9ef1c945715" + "outputId": "93881540-331e-4890-cd38-4c2776933238" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", @@ -474,16 +474,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 50.5MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 39.3MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.014s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.020s)\n", - "Speed: 0.6ms pre-process, 17.0ms inference, 20.2ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.9ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 22.0ms\n", + "Speed: 0.6ms pre-process, 18.4ms inference, 24.1ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -527,20 +527,20 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "57c562894aed45cd9a107d0455e3e3f4", - "040d53c6cc924350bcb656cd21a7c713", - "e029890942a74c098408ce5a9a566d51", - "8fb991c03e434566a4297b6ab9446f89", - "a9a376923a7742d89fb335db709c7a7e", - "8b4276ac834c4735bf60ee9b761b9962", - "52cc8da75b724198856617247541cb1e", - "b6652f46480243c4adf60e6440043d6f", - "e502754177ff4ea8abf82a6e9ac77a4a", - "447398becdb04836b5ffb5915318db07", - "2fddcb27ad4a4caa81ff51111f8d0ed6" + "6d6b90ead2db49b3bdf624b6ba9b44e9", + "cb77443edb9e43328a56aaa4413a0df3", + "954c8b8699e143bf92be6bfc02fc52f6", + "a64775946e13477f83d8bba6086385b9", + "1413611b7f4f4ef99e4f541f5ca35ed6", + "00737f5558eb4fbd968172acb978e54a", + "f03e5ddfd1c04bedaf68ab02c3f6f0ea", + "6926db7e0035455f99e1dd4508c4b19c", + "a6a52c9f828b458e97ddf7a11ae9275f", + "c4c7dc45a1c24dc4b2c709e21271a37e", + "09c43ffe2c7e4bdc9489e83f9d82ab73" ] }, - "outputId": "d593b41a-55e7-48a5-e285-5df449edc8c0" + "outputId": "ed2ca46e-a1a9-4a16-c449-859278d8aa18" }, "source": [ "# Download COCO val\n", @@ -558,7 +558,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "57c562894aed45cd9a107d0455e3e3f4" + "model_id": "6d6b90ead2db49b3bdf624b6ba9b44e9" } }, "metadata": {} @@ -572,7 +572,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "701132a6-9ca8-4e1f-c89f-5d38893a6fc4" + "outputId": "19a590ef-363e-424c-d9ce-78bbe0593cd5" }, "source": [ "# Run YOLOv5x on COCO val\n", @@ -585,35 +585,35 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt to yolov5x.pt...\n", - "100% 166M/166M [00:11<00:00, 15.1MB/s]\n", + "100% 166M/166M [00:06<00:00, 28.1MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n", "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 48.6MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10889.87it/s]\n", + "100% 755k/755k [00:00<00:00, 47.3MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10756.32it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:05<00:00, 2.38it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:07<00:00, 2.33it/s]\n", " all 5000 36335 0.743 0.625 0.683 0.504\n", - "Speed: 0.1ms pre-process, 4.7ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 4.6ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.39s)\n", + "Done (t=0.41s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.53s)\n", + "DONE (t=5.64s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=73.01s).\n", + "DONE (t=76.80s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.27s).\n", + "DONE (t=14.61s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n", @@ -745,7 +745,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "50a9318f-d438-41d5-db95-928f1842c057" + "outputId": "47759d5e-34f0-4a6a-c714-ff533391cfff" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", @@ -759,7 +759,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", @@ -768,8 +768,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 12.4MB/s]\n", - "Dataset download success ✅ (1.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 75.3MB/s]\n", + "Dataset download success ✅ (0.7s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -803,11 +803,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 8516.89it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7246.20it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 1043.44it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 986.21it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Thu, 18 Aug 2022 20:26:18 +0200 Subject: [PATCH 025/326] Remove unused `time_sync` import (#9026) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- val.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/val.py b/val.py index 876fc5bf50bb..7b4fab4c63be 100644 --- a/val.py +++ b/val.py @@ -42,7 +42,7 @@ scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study -from utils.torch_utils import select_device, smart_inference_mode, time_sync +from utils.torch_utils import select_device, smart_inference_mode def save_one_txt(predn, save_conf, shape, file): From eb359c3a226f55c9b51efcfeae2e31c820e6e08a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 21:45:11 +0200 Subject: [PATCH 026/326] Add PyTorch Hub classification CI checks (#9027) * Add PyTorch Hub classification CI checks Add PyTorch Hub loading of official and custom trained classification models to CI checks. May help resolve https://github.com/ultralytics/yolov5/issues/8790#issuecomment-1219840718 Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 5 +++++ hubconf.py | 6 +++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index aa797c44d487..fde6fffe92f4 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -133,3 +133,8 @@ jobs: python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist2560/test/7/60.png # predict python classify/predict.py --imgsz 32 --weights $m --source data/images/bus.jpg # predict python export.py --weights $b --img 64 --imgsz 224 --include torchscript # export + python - < Date: Fri, 19 Aug 2022 01:30:14 +0200 Subject: [PATCH 027/326] Created using Colaboratory --- tutorial.ipynb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 97e572798427..7a1edf7ef86a 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -415,7 +415,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -467,7 +467,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -547,7 +547,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -578,7 +578,7 @@ "# Run YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -751,7 +751,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -1113,7 +1113,7 @@ "cell_type": "code", "source": [ "# Classification\n", - "for m in [*(f'yolov5{x}.pt' for x in 'nsmlx'), 'resnet50.pt', 'efficientnet_b0.pt']:\n", + "for m in [*(f'yolov5{x}-cls.pt' for x in 'nsmlx'), 'resnet50.pt', 'efficientnet_b0.pt']:\n", " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" ], From 840b7232dbaff6296e6e2519895c3065e937fdcf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 01:59:51 +0200 Subject: [PATCH 028/326] Attach transforms to model (#9028) * Attach transforms to model Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/train.py | 10 +++++----- classify/val.py | 3 +-- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/classify/train.py b/classify/train.py index 9fb7c52b545a..5881e16e47db 100644 --- a/classify/train.py +++ b/classify/train.py @@ -122,16 +122,16 @@ def train(opt, device): for p in model.parameters(): p.requires_grad = True # for training model = model.to(device) - names = trainloader.dataset.classes # class names - model.names = names # attach class names # Info if RANK in {-1, 0}: + model.names = trainloader.dataset.classes # attach class names + model.transforms = testloader.dataset.torch_transforms # attach inference transforms model_info(model) if opt.verbose: LOGGER.info(model) images, labels = next(iter(trainloader)) - file = imshow_cls(images[:25], labels[:25], names=names, f=save_dir / 'train_images.jpg') + file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg') logger.log_images(file, name='Train Examples') logger.log_graph(model, imgsz) # log model @@ -254,8 +254,8 @@ def train(opt, device): # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels - pred = torch.max(ema.ema((images.half() if cuda else images.float()).to(device)), 1)[1] - file = imshow_cls(images, labels, pred, names, verbose=False, f=save_dir / 'test_images.jpg') + pred = torch.max(ema.ema(images.to(device)), 1)[1] + file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') # Log results meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} diff --git a/classify/val.py b/classify/val.py index c91e2cf82c81..2353737957d3 100644 --- a/classify/val.py +++ b/classify/val.py @@ -39,7 +39,7 @@ def run( project=ROOT / 'runs/val-cls', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference + half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, @@ -124,7 +124,6 @@ def run( LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - model.float() # for training return top1, top5, loss From 1cd3e752def0ecbcb39a95d75e3c93fad3114ab9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 02:01:40 +0200 Subject: [PATCH 029/326] Created using Colaboratory --- tutorial.ipynb | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7a1edf7ef86a..a70887e97360 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1112,8 +1112,8 @@ { "cell_type": "code", "source": [ - "# Classification\n", - "for m in [*(f'yolov5{x}-cls.pt' for x in 'nsmlx'), 'resnet50.pt', 'efficientnet_b0.pt']:\n", + "# Classification train\n", + "for m in [*(f'yolov5{x}-cls.pt' for x in 'nsmlx'), 'resnet50.pt', 'resnet101.pt', 'efficientnet_b0.pt', 'efficientnet_b1.pt']:\n", " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" ], @@ -1123,6 +1123,19 @@ "execution_count": null, "outputs": [] }, + { + "cell_type": "code", + "source": [ + "# Classification val\n", + "!bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)\n", + "!python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate" + ], + "metadata": { + "id": "yYgOiFNHZx-1" + }, + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "metadata": { From 781401ec70bc481b789b214003b722174e4b99e0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 15:06:37 +0200 Subject: [PATCH 030/326] Default --data `imagenette160` training (fastest) (#9033) * Default --data `imagenette160` training (fastest) Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/train.py | 6 +++--- train.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/classify/train.py b/classify/train.py index 5881e16e47db..8fe90c1b19eb 100644 --- a/classify/train.py +++ b/classify/train.py @@ -6,7 +6,7 @@ Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html Usage - Single-GPU and Multi-GPU DDP - $ python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 128 + $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 128 $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 """ @@ -266,8 +266,8 @@ def train(opt, device): def parse_opt(known=False): parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path') - parser.add_argument('--data', type=str, default='mnist', help='cifar10, cifar100, mnist, imagenet, etc.') - parser.add_argument('--epochs', type=int, default=10) + parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...') + parser.add_argument('--epochs', type=int, default=10, help='total training epochs') parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=128, help='train, val image size (pixels)') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') diff --git a/train.py b/train.py index 10a3bdb56002..279d52de6d74 100644 --- a/train.py +++ b/train.py @@ -436,7 +436,7 @@ def parse_opt(known=False): parser.add_argument('--cfg', type=str, default='', help='model.yaml path') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300) + parser.add_argument('--epochs', type=int, default=300, help='total training epochs') parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') From 4a8ab3bc42d32f3e2e9c026b87dc29fba6143064 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 15:07:04 +0200 Subject: [PATCH 031/326] VOC `names` dictionary fix (#9034) * VOC names dictionary fix Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/VOC.yaml | 5 +++-- utils/dataloaders.py | 10 ++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/data/VOC.yaml b/data/VOC.yaml index bbe5cf90a838..27d38109c53a 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -65,12 +65,13 @@ download: | w = int(size.find('width').text) h = int(size.find('height').text) + names = list(yaml['names'].values()) # names list for obj in root.iter('object'): cls = obj.find('name').text - if cls in yaml['names'] and not int(obj.find('difficult').text) == 1: + if cls in names and int(obj.find('difficult').text) != 1: xmlbox = obj.find('bndbox') bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) - cls_id = yaml['names'].index(cls) # class id + cls_id = names.index(cls) # class id out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 3f26be2cd32d..e73b20a58915 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -35,7 +35,7 @@ from utils.torch_utils import torch_distributed_zero_first # Parameters -HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format @@ -456,7 +456,7 @@ def __init__(self, # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.im_files, f'{prefix}No images found' except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') + raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') # Check cache self.label_files = img2label_paths(self.im_files) # labels @@ -475,11 +475,13 @@ def __init__(self, tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings - assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' + assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' # Read cache [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items labels, shapes, self.segments = zip(*cache.values()) + nl = len(np.concatenate(labels, 0)) # number of labels + assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' self.labels = list(labels) self.shapes = np.array(shapes) self.im_files = list(cache.keys()) # update @@ -572,7 +574,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if msgs: LOGGER.info('\n'.join(msgs)) if nf == 0: - LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') + LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. {HELP_URL}') x['hash'] = get_hash(self.label_files + self.im_files) x['results'] = nf, nm, ne, nc, len(self.im_files) x['msgs'] = msgs # warnings From fdcb92a938ef27d1b277a156af7f7922400279e3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 16:54:58 +0200 Subject: [PATCH 032/326] Update train.py `import val as validate` (#9037) * Update train.py `import val as validate` Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/train.py b/train.py index 279d52de6d74..665b4f5b609e 100644 --- a/train.py +++ b/train.py @@ -36,7 +36,7 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -import val # for end-of-epoch mAP +import val as validate # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors @@ -347,17 +347,17 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = (epoch + 1 == epochs) or stopper.possible_stop if not noval or final_epoch: # Calculate mAP - results, maps, _ = val.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - half=amp, - model=ema.ema, - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - plots=False, - callbacks=callbacks, - compute_loss=compute_loss) + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] @@ -407,12 +407,12 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio strip_optimizer(f) # strip optimizers if f is best: LOGGER.info(f'\nValidating {f}...') - results, _, _ = val.run( + results, _, _ = validate.run( data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, model=attempt_load(f, device).half(), - iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 + iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, From aed88848a25fe0f4d98e70e79f0ee876265b48fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 18:00:38 +0200 Subject: [PATCH 033/326] Simplified notebook --- tutorial.ipynb | 67 ++++++++++++++------------------------------------ 1 file changed, 18 insertions(+), 49 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index a70887e97360..1c5d77813f15 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -439,7 +439,7 @@ "id": "4JnkELT0cIJg" }, "source": [ - "# 1. Inference\n", + "# 1. Detect\n", "\n", "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", "\n", @@ -506,17 +506,7 @@ }, "source": [ "# 2. Validate\n", - "Validate a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "eyTZYGgRjnMc" - }, - "source": [ - "## COCO val\n", - "Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy." + "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." ] }, { @@ -544,8 +534,8 @@ }, "source": [ "# Download COCO val\n", - "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", - "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" + "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download COCO val (1GB - 5000 images)\n", + "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], "execution_count": null, "outputs": [ @@ -575,7 +565,7 @@ "outputId": "19a590ef-363e-424c-d9ce-78bbe0593cd5" }, "source": [ - "# Run YOLOv5x on COCO val\n", + "# Validate YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], "execution_count": null, @@ -631,40 +621,6 @@ } ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "rc_KbFk0juX2" - }, - "source": [ - "## COCO test\n", - "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (**20,000 images, no labels**). Results are saved to a `*.json` file which should be **zipped** and submitted to the evaluation server at https://competitions.codalab.org/competitions/20794." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "V0AJnSeCIHyJ" - }, - "source": [ - "# Download COCO test-dev2017\n", - "!bash data/scripts/get_coco.sh --test" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "29GJXAP_lPrt" - }, - "source": [ - "# Run YOLOv5x on COCO test\n", - "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" - ], - "execution_count": null, - "outputs": [] - }, { "cell_type": "markdown", "metadata": { @@ -1136,6 +1092,19 @@ "execution_count": null, "outputs": [] }, + { + "cell_type": "code", + "source": [ + "# Validate on COCO test. Zip results.json and submit to eval server at https://competitions.codalab.org/competitions/20794\n", + "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7GB - 40,000 images, test 20,000)\n", + "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" + ], + "metadata": { + "id": "aq4DPWGu0Bl1" + }, + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "metadata": { From ba1c6773c2691943a355ad956105a4cb3aeedbca Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 18:41:30 +0200 Subject: [PATCH 034/326] Created using Colaboratory --- tutorial.ipynb | 45 ++++++++++++++------------------------------- 1 file changed, 14 insertions(+), 31 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 1c5d77813f15..91e2d7e75eab 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -654,46 +654,29 @@ "

Label images lightning fast (including with model-assisted labeling)" ] }, - { - "cell_type": "code", - "metadata": { - "id": "bOy5KI2ncnWd" - }, - "source": [ - "# Tensorboard (optional)\n", - "%load_ext tensorboard\n", - "%tensorboard --logdir runs/train" - ], - "execution_count": null, - "outputs": [] - }, { "cell_type": "code", "source": [ - "# ClearML (optional)\n", - "%pip install -q clearml\n", - "!clearml-init" + "#@title Select YOLOv5 🚀 logger\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'ClearML', 'W&B']\n", + "\n", + "if logger == 'Tensorboard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train\n", + "elif logger == 'ClearML':\n", + " %pip install -q clearml\n", + " !clearml-init\n", + "elif logger == 'W&B':\n", + " %pip install -q wandb\n", + " import wandb\n", + " wandb.login()" ], "metadata": { - "id": "DQhI6vvaRWjR" + "id": "i3oKtE4g-aNn" }, "execution_count": null, "outputs": [] }, - { - "cell_type": "code", - "metadata": { - "id": "2fLAV42oNb7M" - }, - "source": [ - "# Weights & Biases (optional)\n", - "%pip install -q wandb\n", - "import wandb\n", - "wandb.login()" - ], - "execution_count": null, - "outputs": [] - }, { "cell_type": "code", "metadata": { From a409ec7953e1c5dd572fc73f633de38efe0c101a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Aug 2022 16:29:08 +0200 Subject: [PATCH 035/326] AutoBatch protect from negative batch sizes (#9048) * AutoBatch protect from negative batch sizes Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/autobatch.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/autobatch.py b/utils/autobatch.py index 07cddc99f400..8d12e46f0f09 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -60,6 +60,9 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): i = results.index(None) # first fail index if b >= batch_sizes[i]: # y intercept above failure point b = batch_sizes[max(i - 1, 0)] # select prior safe point + if b < 1: # zero or negative batch size + b = 16 + LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') fraction = np.polyval(p, b) / t # actual fraction predicted LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') From fc8758a49bd30526fb21d0683359e86be3a292a8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Aug 2022 16:45:11 +0200 Subject: [PATCH 036/326] Temporarily remove `macos-latest` from CI (#9049) * Temporarily remove macos-latest from CI macos-latest causing many failed CI runs that resolve after manually re-running 2 or 3 times. I don't know what the cause is. Will replace at a later date. Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index fde6fffe92f4..4ef930c61233 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -47,7 +47,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ ubuntu-latest, macos-latest, windows-latest ] + os: [ ubuntu-latest, windows-latest ] # macos-latest bug https://github.com/ultralytics/yolov5/pull/9049 python-version: [ '3.10' ] model: [ yolov5n ] include: From f258cf8b37aeb3062230d43e1e9a4bf3b9874588 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Aug 2022 17:17:35 +0200 Subject: [PATCH 037/326] Add `--save-hybrid` mAP warning (#9050) * Add `--save-hybrid` mAP warning Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- val.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/val.py b/val.py index 7b4fab4c63be..fcaca889d7e2 100644 --- a/val.py +++ b/val.py @@ -365,6 +365,8 @@ def main(opt): if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + if opt.save_hybrid: + LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') run(**vars(opt)) else: From c725511bfc14eb86daf6edefa0d257084aa24c85 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 01:34:03 +0200 Subject: [PATCH 038/326] Refactor for simplification (#9054) * Refactor for simplification * cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/downloads.py | 2 +- utils/general.py | 5 +++-- utils/metrics.py | 2 +- utils/plots.py | 8 +++----- utils/torch_utils.py | 11 +++++------ 5 files changed, 13 insertions(+), 15 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index c4d4a85c38ae..69887a579966 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -46,7 +46,7 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): except Exception as e: # url2 file.unlink(missing_ok=True) # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check file.unlink(missing_ok=True) # remove partial downloads diff --git a/utils/general.py b/utils/general.py index 42d000918c13..d9f436a36359 100755 --- a/utils/general.py +++ b/utils/general.py @@ -582,7 +582,7 @@ def url2file(url): def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): - # Multi-threaded file download and unzip function, used in data.yaml for autodownload + # Multithreaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): # Download 1 file success = True @@ -594,7 +594,8 @@ def download_one(url, dir): for i in range(retry + 1): if curl: s = 'sS' if threads > 1 else '' # silent - r = os.system(f'curl -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue + r = os.system( + f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue success = r == 0 else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download diff --git a/utils/metrics.py b/utils/metrics.py index 08880cd3f212..8fa3c7e217c7 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -141,7 +141,7 @@ def process_batch(self, detections, labels): """ if detections is None: gt_classes = labels.int() - for i, gc in enumerate(gt_classes): + for gc in gt_classes: self.matrix[self.nc, gc] += 1 # background FN return diff --git a/utils/plots.py b/utils/plots.py index 7417308c4d82..2c7a80b4c872 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -3,6 +3,7 @@ Plotting utils """ +import contextlib import math import os from copy import copy @@ -180,8 +181,7 @@ def output_to_target(output): # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] targets = [] for i, o in enumerate(output): - for *box, conf, cls in o.cpu().numpy(): - targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + targets.extend([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf] for *box, conf, cls in o.cpu().numpy()) return np.array(targets) @@ -357,10 +357,8 @@ def plot_labels(labels, names=(), save_dir=Path('')): matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - try: # color histogram bars by class + with contextlib.suppress(Exception): # color histogram bars by class [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 - except Exception: - pass ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 4de2520b26a2..88108906bfd3 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -45,11 +45,10 @@ def decorate(fn): def smartCrossEntropyLoss(label_smoothing=0.0): # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 if check_version(torch.__version__, '1.10.0'): - return nn.CrossEntropyLoss(label_smoothing=label_smoothing) # loss function - else: - if label_smoothing > 0: - LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') - return nn.CrossEntropyLoss() # loss function + return nn.CrossEntropyLoss(label_smoothing=label_smoothing) + if label_smoothing > 0: + LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') + return nn.CrossEntropyLoss() def smart_DDP(model): @@ -118,7 +117,7 @@ def select_device(device='', batch_size=0, newline=True): assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" - if not (cpu or mps) and torch.cuda.is_available(): # prefer GPU if available + if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch_size > 0: # check batch_size is divisible by device_count From 93f63ee33f2dd2fe9e61268464c9a79f30aa7549 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 02:00:35 +0200 Subject: [PATCH 039/326] Refactor for simplification 2 (#9055) * Refactor for simplification 2 * Update __init__.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 3 +-- utils/loggers/__init__.py | 20 +++++++------------- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/export.py b/export.py index 7b398fdc4d93..166b5f406a20 100644 --- a/export.py +++ b/export.py @@ -436,8 +436,7 @@ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' subprocess.run(cmd.split()) - with open(f_json) as j: - json = j.read() + json = Path(f_json).read_text() with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order subst = re.sub( r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 34704b625294..b95a463717f8 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -187,18 +187,16 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): # Callback runs on model save event - if self.wandb: - if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: + if self.wandb: self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - - if self.clearml: - if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + if self.clearml: self.clearml.task.update_output_model(model_path=str(last), model_name='Latest Model', auto_delete_file=False) def on_train_end(self, last, best, plots, epoch, results): - # Callback runs on training end + # Callback runs on training end, i.e. saving best model if plots: plot_results(file=self.save_dir / 'results.csv') # save results.png files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] @@ -220,15 +218,11 @@ def on_train_end(self, last, best, plots, epoch, results): aliases=['latest', 'best', 'stripped']) self.wandb.finish_run() - if self.clearml: - # Save the best model here - if not self.opt.evolve: - self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), - name='Best Model') + if self.clearml and not self.opt.evolve: + self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), name='Best Model') - def on_params_update(self, params): + def on_params_update(self, params: dict): # Update hyperparams or configs of the experiment - # params: A dict containing {param: value} pairs if self.wandb: self.wandb.wandb_run.config.update(params, allow_val_change=True) From 841f312f9384d3ab8f2ff2ae287441ecfba03740 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 02:23:39 +0200 Subject: [PATCH 040/326] zero-mAP fix return `.detach()` to EMA (#9056) Resolves https://github.com/ultralytics/hub/issues/82 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 88108906bfd3..b934248dee43 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -422,7 +422,7 @@ def update(self, model): for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: # true for FP16 and FP32 v *= d - v += (1 - d) * msd[k] + v += (1 - d) * msd[k].detach() assert v.dtype == msd[k].dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must be updated in FP32' def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): From 27fb6fd8fc21c20290041f38046d7a60ae8c6e3a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 03:22:24 +0200 Subject: [PATCH 041/326] zero-mAP fix 3 (#9058) * zero-mAP fix 3 Signed-off-by: Glenn Jocher * Update torch_utils.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update torch_utils.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/torch_utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b934248dee43..5fbe8bbf10f6 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -412,7 +412,6 @@ def __init__(self, model, decay=0.9999, tau=2000, updates=0): for p in self.ema.parameters(): p.requires_grad_(False) - @smart_inference_mode() def update(self, model): # Update EMA parameters self.updates += 1 @@ -423,7 +422,7 @@ def update(self, model): if v.dtype.is_floating_point: # true for FP16 and FP32 v *= d v += (1 - d) * msd[k].detach() - assert v.dtype == msd[k].dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must be updated in FP32' + assert v.dtype == msd[k].detach().dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must both be FP32' def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes From e0700cce776c557e7cee51103c53032b766f224a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 03:47:37 +0200 Subject: [PATCH 042/326] Daemon `plot_labels()` for faster start (#9057) * Daemon `plot_labels()` for faster start * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 10 +++------- utils/callbacks.py | 13 +++++++++---- utils/general.py | 2 +- utils/loggers/__init__.py | 12 +++++++----- utils/plots.py | 1 - 5 files changed, 20 insertions(+), 18 deletions(-) diff --git a/train.py b/train.py index 665b4f5b609e..0bfcaffc16db 100644 --- a/train.py +++ b/train.py @@ -52,7 +52,7 @@ from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness -from utils.plots import plot_evolve, plot_labels +from utils.plots import plot_evolve from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, smart_resume, torch_distributed_zero_first) @@ -215,15 +215,11 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio prefix=colorstr('val: '))[0] if not resume: - if plots: - plot_labels(labels, names, save_dir) - - # Anchors if not opt.noautoanchor: - check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor model.half().float() # pre-reduce anchor precision - callbacks.run('on_pretrain_routine_end') + callbacks.run('on_pretrain_routine_end', labels, names, plots) # DDP mode if cuda and RANK != -1: diff --git a/utils/callbacks.py b/utils/callbacks.py index 2b32df0bf1c1..166d8938322d 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -3,6 +3,8 @@ Callback utils """ +import threading + class Callbacks: """" @@ -55,17 +57,20 @@ def get_registered_actions(self, hook=None): """ return self._callbacks[hook] if hook else self._callbacks - def run(self, hook, *args, **kwargs): + def run(self, hook, *args, thread=False, **kwargs): """ - Loop through the registered actions and fire all callbacks + Loop through the registered actions and fire all callbacks on main thread Args: hook: The name of the hook to check, defaults to all args: Arguments to receive from YOLOv5 + thread: (boolean) Run callbacks in daemon thread kwargs: Keyword Arguments to receive from YOLOv5 """ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - for logger in self._callbacks[hook]: - logger['callback'](*args, **kwargs) + if thread: + threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start() + else: + logger['callback'](*args, **kwargs) diff --git a/utils/general.py b/utils/general.py index d9f436a36359..3bc6fbc22d57 100755 --- a/utils/general.py +++ b/utils/general.py @@ -622,7 +622,7 @@ def download_one(url, dir): dir.mkdir(parents=True, exist_ok=True) # make directory if threads > 1: pool = ThreadPool(threads) - pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded pool.close() pool.join() else: diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index b95a463717f8..c5cdd92772f2 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -11,10 +11,10 @@ import torch from torch.utils.tensorboard import SummaryWriter -from utils.general import colorstr, cv2 +from utils.general import colorstr, cv2, threaded from utils.loggers.clearml.clearml_utils import ClearmlLogger from utils.loggers.wandb.wandb_utils import WandbLogger -from utils.plots import plot_images, plot_results +from utils.plots import plot_images, plot_labels, plot_results from utils.torch_utils import de_parallel LOGGERS = ('csv', 'tb', 'wandb', 'clearml') # *.csv, TensorBoard, Weights & Biases, ClearML @@ -110,13 +110,15 @@ def on_train_start(self): # Callback runs on train start pass - def on_pretrain_routine_end(self): + def on_pretrain_routine_end(self, labels, names, plots): # Callback runs on pre-train routine end + if plots: + plot_labels(labels, names, self.save_dir) paths = self.save_dir.glob('*labels*.jpg') # training labels if self.wandb: self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - if self.clearml: - pass # ClearML saves these images automatically using hooks + # if self.clearml: + # pass # ClearML saves these images automatically using hooks def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end diff --git a/utils/plots.py b/utils/plots.py index 2c7a80b4c872..7e1de43aba1b 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -340,7 +340,6 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ @try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 -@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") From 6077bf032aa67b8b849b755aa29c66b2eaaee59e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 13:10:02 +0200 Subject: [PATCH 043/326] TensorBoard fix in tutorial.ipynb (#9064) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 91e2d7e75eab..55e423d72833 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -660,7 +660,7 @@ "#@title Select YOLOv5 🚀 logger\n", "logger = 'TensorBoard' #@param ['TensorBoard', 'ClearML', 'W&B']\n", "\n", - "if logger == 'Tensorboard':\n", + "if logger == 'TensorBoard':\n", " %load_ext tensorboard\n", " %tensorboard --logdir runs/train\n", "elif logger == 'ClearML':\n", @@ -1103,4 +1103,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 794f117f4bdd02171273d49da33c1e8a22037f76 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 13:33:13 +0200 Subject: [PATCH 044/326] Created using Colaboratory --- tutorial.ipynb | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 55e423d72833..040197bf8365 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -664,12 +664,10 @@ " %load_ext tensorboard\n", " %tensorboard --logdir runs/train\n", "elif logger == 'ClearML':\n", - " %pip install -q clearml\n", - " !clearml-init\n", + " %pip install -q clearml && clearml-init\n", "elif logger == 'W&B':\n", " %pip install -q wandb\n", - " import wandb\n", - " wandb.login()" + " import wandb; wandb.login()" ], "metadata": { "id": "i3oKtE4g-aNn" @@ -1103,4 +1101,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 1499526f5668f97832abf39c9e24e2acf3f98fdf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 14:20:12 +0200 Subject: [PATCH 045/326] Created using Colaboratory --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 040197bf8365..a8975424cb39 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -534,7 +534,7 @@ }, "source": [ "# Download COCO val\n", - "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download COCO val (1GB - 5000 images)\n", + "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], "execution_count": null, @@ -657,7 +657,7 @@ { "cell_type": "code", "source": [ - "#@title Select YOLOv5 🚀 logger\n", + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", "logger = 'TensorBoard' #@param ['TensorBoard', 'ClearML', 'W&B']\n", "\n", "if logger == 'TensorBoard':\n", @@ -1077,7 +1077,7 @@ "cell_type": "code", "source": [ "# Validate on COCO test. Zip results.json and submit to eval server at https://competitions.codalab.org/competitions/20794\n", - "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7GB - 40,000 images, test 20,000)\n", + "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7G - 40,000 images, test 20,000)\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" ], "metadata": { From e6b4bf0bc26c06d54dd92eacef89decdc580a0f5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 14:21:48 +0200 Subject: [PATCH 046/326] Created using Colaboratory --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index a8975424cb39..8753a2310b1d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1064,7 +1064,7 @@ "cell_type": "code", "source": [ "# Classification val\n", - "!bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)\n", + "!bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G - 50000 images)\n", "!python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate" ], "metadata": { @@ -1077,7 +1077,7 @@ "cell_type": "code", "source": [ "# Validate on COCO test. Zip results.json and submit to eval server at https://competitions.codalab.org/competitions/20794\n", - "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7G - 40,000 images, test 20,000)\n", + "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7G - 40000 images, test 20000)\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" ], "metadata": { From 87e8deadd563982672a1c5104a68d1a67f0cf765 Mon Sep 17 00:00:00 2001 From: 0zppd <111682241+0zppd@users.noreply.github.com> Date: Sun, 21 Aug 2022 18:40:28 +0500 Subject: [PATCH 047/326] zero-mAP fix remove `torch.empty()` forward pass in `.train()` mode (#9068) * Fix Zero Map Issue Signed-off-by: 0zppd <111682241+0zppd@users.noreply.github.com> * Update __init__.py Signed-off-by: Glenn Jocher Signed-off-by: 0zppd <111682241+0zppd@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index c5cdd92772f2..b9869df26a43 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -296,7 +296,7 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): try: p = next(model.parameters()) # for device, type imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand - im = torch.empty((1, 3, *imgsz)).to(p.device).type_as(p) # input image + im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty) with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) From 0b8639a40a9c73a9ee1556405fabfd2d46087299 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 15:50:02 +0200 Subject: [PATCH 048/326] Rename 'labels' to 'instances' (#9066) * Rename labels to instances * Rename labels to instances * align val --- train.py | 4 ++-- val.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/train.py b/train.py index 0bfcaffc16db..ac38d04dba90 100644 --- a/train.py +++ b/train.py @@ -271,7 +271,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) - LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) + LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) if RANK in {-1, 0}: pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() @@ -326,7 +326,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if RANK in {-1, 0}: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % + pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots) if callbacks.stop_training: diff --git a/val.py b/val.py index fcaca889d7e2..f9557bba651d 100644 --- a/val.py +++ b/val.py @@ -186,7 +186,7 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] @@ -270,7 +270,7 @@ def run( nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class # Print results - pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format + pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) if nt.sum() == 0: LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') From 8665d557c1caa66c190c1ec26b377eeae385d1d6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 16:51:50 +0200 Subject: [PATCH 049/326] Threaded TensorBoard graph logging (#9070) * Log TensorBoard graph on pretrain_routine_end * fix --- train.py | 6 +++--- utils/loggers/__init__.py | 34 ++++++++++++++++++---------------- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/train.py b/train.py index ac38d04dba90..e4c9b6ae6749 100644 --- a/train.py +++ b/train.py @@ -219,7 +219,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor model.half().float() # pre-reduce anchor precision - callbacks.run('on_pretrain_routine_end', labels, names, plots) + callbacks.run('on_pretrain_routine_end', labels, names) # DDP mode if cuda and RANK != -1: @@ -328,7 +328,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots) + callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) if callbacks.stop_training: return # end batch ------------------------------------------------------------------------------------------------ @@ -420,7 +420,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if is_coco: callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) - callbacks.run('on_train_end', last, best, plots, epoch, results) + callbacks.run('on_train_end', last, best, epoch, results) torch.cuda.empty_cache() return results diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index b9869df26a43..98a123eee74d 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -49,6 +49,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.weights = weights self.opt = opt self.hyp = hyp + self.plots = not opt.noplots # plot results self.logger = logger # for printing results to console self.include = include self.keys = [ @@ -110,26 +111,26 @@ def on_train_start(self): # Callback runs on train start pass - def on_pretrain_routine_end(self, labels, names, plots): + def on_pretrain_routine_end(self, labels, names): # Callback runs on pre-train routine end - if plots: + if self.plots: plot_labels(labels, names, self.save_dir) - paths = self.save_dir.glob('*labels*.jpg') # training labels - if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - # if self.clearml: - # pass # ClearML saves these images automatically using hooks + paths = self.save_dir.glob('*labels*.jpg') # training labels + if self.wandb: + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + # if self.clearml: + # pass # ClearML saves these images automatically using hooks - def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): + def on_train_batch_end(self, model, ni, imgs, targets, paths): # Callback runs on train batch end # ni: number integrated batches (since train start) - if plots: - if ni == 0 and not self.opt.sync_bn and self.tb: - log_tensorboard_graph(self.tb, model, imgsz=list(imgs.shape[2:4])) + if self.plots: if ni < 3: f = self.save_dir / f'train_batch{ni}.jpg' # filename plot_images(imgs, targets, paths, f) - if (self.wandb or self.clearml) and ni == 10: + if ni == 0 and self.tb and not self.opt.sync_bn: + log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz)) + if ni == 10 and (self.wandb or self.clearml): files = sorted(self.save_dir.glob('train*.jpg')) if self.wandb: self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) @@ -197,9 +198,9 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): model_name='Latest Model', auto_delete_file=False) - def on_train_end(self, last, best, plots, epoch, results): + def on_train_end(self, last, best, epoch, results): # Callback runs on training end, i.e. saving best model - if plots: + if self.plots: plot_results(file=self.save_dir / 'results.csv') # save results.png files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter @@ -291,6 +292,7 @@ def log_model(self, model_path, epoch=0, metadata={}): wandb.log_artifact(art) +@threaded def log_tensorboard_graph(tb, model, imgsz=(640, 640)): # Log model graph to TensorBoard try: @@ -300,5 +302,5 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) - except Exception: - print('WARNING: TensorBoard graph visualization failure') + except Exception as e: + print(f'WARNING: TensorBoard graph visualization failure {e}') From 5373a28c1bcede65e513b7be0ab5a0d43125c90c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 17:01:05 +0200 Subject: [PATCH 050/326] Created using Colaboratory --- tutorial.ipynb | 451 ++++++++++++++++++++++++++----------------------- 1 file changed, 243 insertions(+), 208 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 8753a2310b1d..5b7b1f287d7e 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -17,110 +17,121 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "6d6b90ead2db49b3bdf624b6ba9b44e9": { + "da0946bcefd9414fa282977f7f609e36": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", - "model_module_version": "1.5.0", + "model_module_version": "2.0.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", + "_model_module_version": "2.0.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", + "_view_module_version": "2.0.0", "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_cb77443edb9e43328a56aaa4413a0df3", - "IPY_MODEL_954c8b8699e143bf92be6bfc02fc52f6", - "IPY_MODEL_a64775946e13477f83d8bba6086385b9" + "IPY_MODEL_7838c0af44244ccc906c413cea0989d7", + "IPY_MODEL_309ea78b3e814198b4080beb878d5329", + "IPY_MODEL_b2d1d998e5db4ca1a36280902e1647c7" ], - "layout": "IPY_MODEL_1413611b7f4f4ef99e4f541f5ca35ed6" + "layout": "IPY_MODEL_e7d7f56c77884717ba122f1d603c0852", + "tabbable": null, + "tooltip": null } }, - "cb77443edb9e43328a56aaa4413a0df3": { + "7838c0af44244ccc906c413cea0989d7": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", - "model_module_version": "1.5.0", + "model_module_version": "2.0.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", + "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", + "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_00737f5558eb4fbd968172acb978e54a", + "description_allow_html": false, + "layout": "IPY_MODEL_abf60d6b8ea847f9bb358ae2b045458b", "placeholder": "​", - "style": "IPY_MODEL_f03e5ddfd1c04bedaf68ab02c3f6f0ea", + "style": "IPY_MODEL_379196a2761b4a29aca8ef088dc60c10", + "tabbable": null, + "tooltip": null, "value": "100%" } }, - "954c8b8699e143bf92be6bfc02fc52f6": { + "309ea78b3e814198b4080beb878d5329": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", + "model_module_version": "2.0.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", + "_model_module_version": "2.0.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", + "_view_module_version": "2.0.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_6926db7e0035455f99e1dd4508c4b19c", + "description_allow_html": false, + "layout": "IPY_MODEL_52b546a356e54174a95049b30cb52c81", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_a6a52c9f828b458e97ddf7a11ae9275f", + "style": "IPY_MODEL_0889e134327e4aa0a8719d03a0d6941b", + "tabbable": null, + "tooltip": null, "value": 818322941 } }, - "a64775946e13477f83d8bba6086385b9": { + "b2d1d998e5db4ca1a36280902e1647c7": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", - "model_module_version": "1.5.0", + "model_module_version": "2.0.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", + "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", + "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_c4c7dc45a1c24dc4b2c709e21271a37e", + "description_allow_html": false, + "layout": "IPY_MODEL_30f22a3e42d24f10ad9851f40a6703f3", "placeholder": "​", - "style": "IPY_MODEL_09c43ffe2c7e4bdc9489e83f9d82ab73", - "value": " 780M/780M [01:12<00:00, 23.8MB/s]" + "style": "IPY_MODEL_648b3512bb7d4ccca5d75af36c133e92", + "tabbable": null, + "tooltip": null, + "value": " 780M/780M [01:31<00:00, 12.3MB/s]" } }, - "1413611b7f4f4ef99e4f541f5ca35ed6": { + "e7d7f56c77884717ba122f1d603c0852": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "1.2.0", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", + "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border": null, + "border_bottom": null, + "border_left": null, + "border_right": null, + "border_top": null, "bottom": null, "display": null, "flex": null, @@ -148,8 +159,6 @@ "object_position": null, "order": null, "overflow": null, - "overflow_x": null, - "overflow_y": null, "padding": null, "right": null, "top": null, @@ -157,22 +166,25 @@ "width": null } }, - "00737f5558eb4fbd968172acb978e54a": { + "abf60d6b8ea847f9bb358ae2b045458b": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "1.2.0", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", + "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border": null, + "border_bottom": null, + "border_left": null, + "border_right": null, + "border_top": null, "bottom": null, "display": null, "flex": null, @@ -200,8 +212,6 @@ "object_position": null, "order": null, "overflow": null, - "overflow_x": null, - "overflow_y": null, "padding": null, "right": null, "top": null, @@ -209,37 +219,43 @@ "width": null } }, - "f03e5ddfd1c04bedaf68ab02c3f6f0ea": { + "379196a2761b4a29aca8ef088dc60c10": { "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", + "model_name": "HTMLStyleModel", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", + "_model_module_version": "2.0.0", + "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "StyleView", - "description_width": "" + "background": null, + "description_width": "", + "font_size": null, + "text_color": null } }, - "6926db7e0035455f99e1dd4508c4b19c": { + "52b546a356e54174a95049b30cb52c81": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "1.2.0", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", + "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border": null, + "border_bottom": null, + "border_left": null, + "border_right": null, + "border_top": null, "bottom": null, "display": null, "flex": null, @@ -267,8 +283,6 @@ "object_position": null, "order": null, "overflow": null, - "overflow_x": null, - "overflow_y": null, "padding": null, "right": null, "top": null, @@ -276,38 +290,41 @@ "width": null } }, - "a6a52c9f828b458e97ddf7a11ae9275f": { + "0889e134327e4aa0a8719d03a0d6941b": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", + "_model_module_version": "2.0.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, - "c4c7dc45a1c24dc4b2c709e21271a37e": { + "30f22a3e42d24f10ad9851f40a6703f3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "1.2.0", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", + "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border": null, + "border_bottom": null, + "border_left": null, + "border_right": null, + "border_top": null, "bottom": null, "display": null, "flex": null, @@ -335,8 +352,6 @@ "object_position": null, "order": null, "overflow": null, - "overflow_x": null, - "overflow_y": null, "padding": null, "right": null, "top": null, @@ -344,19 +359,22 @@ "width": null } }, - "09c43ffe2c7e4bdc9489e83f9d82ab73": { + "648b3512bb7d4ccca5d75af36c133e92": { "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", + "model_name": "HTMLStyleModel", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", + "_model_module_version": "2.0.0", + "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "StyleView", - "description_width": "" + "background": null, + "description_width": "", + "font_size": null, + "text_color": null } } } @@ -404,7 +422,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "508de90c-846e-495d-c7d6-50681af62a98" + "outputId": "4200fd6f-c6f5-4505-a4f9-a918f3ed1f86" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -415,13 +433,13 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" ] }, { @@ -461,29 +479,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "93881540-331e-4890-cd38-4c2776933238" + "outputId": "1af15107-bcd1-4e8f-b5bd-0ee1a737e051" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 39.3MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 41.7MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.9ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 22.0ms\n", - "Speed: 0.6ms pre-process, 18.4ms inference, 24.1ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.5ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 18.9ms\n", + "Speed: 0.5ms pre-process, 16.7ms inference, 21.4ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -515,29 +533,29 @@ "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", - "height": 49, + "height": 17, "referenced_widgets": [ - "6d6b90ead2db49b3bdf624b6ba9b44e9", - "cb77443edb9e43328a56aaa4413a0df3", - "954c8b8699e143bf92be6bfc02fc52f6", - "a64775946e13477f83d8bba6086385b9", - "1413611b7f4f4ef99e4f541f5ca35ed6", - "00737f5558eb4fbd968172acb978e54a", - "f03e5ddfd1c04bedaf68ab02c3f6f0ea", - "6926db7e0035455f99e1dd4508c4b19c", - "a6a52c9f828b458e97ddf7a11ae9275f", - "c4c7dc45a1c24dc4b2c709e21271a37e", - "09c43ffe2c7e4bdc9489e83f9d82ab73" + "da0946bcefd9414fa282977f7f609e36", + "7838c0af44244ccc906c413cea0989d7", + "309ea78b3e814198b4080beb878d5329", + "b2d1d998e5db4ca1a36280902e1647c7", + "e7d7f56c77884717ba122f1d603c0852", + "abf60d6b8ea847f9bb358ae2b045458b", + "379196a2761b4a29aca8ef088dc60c10", + "52b546a356e54174a95049b30cb52c81", + "0889e134327e4aa0a8719d03a0d6941b", + "30f22a3e42d24f10ad9851f40a6703f3", + "648b3512bb7d4ccca5d75af36c133e92" ] }, - "outputId": "ed2ca46e-a1a9-4a16-c449-859278d8aa18" + "outputId": "5f129105-eca5-4f33-fb1d-981255f814ad" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "display_data", @@ -548,7 +566,24 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "6d6b90ead2db49b3bdf624b6ba9b44e9" + "model_id": "da0946bcefd9414fa282977f7f609e36" + }, + "application/json": { + "n": 0, + "total": 818322941, + "elapsed": 0.020366430282592773, + "ncols": null, + "nrows": null, + "prefix": "", + "ascii": false, + "unit": "B", + "unit_scale": true, + "rate": null, + "bar_format": null, + "postfix": null, + "unit_divisor": 1024, + "initial": 0, + "colour": null } }, "metadata": {} @@ -562,48 +597,48 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "19a590ef-363e-424c-d9ce-78bbe0593cd5" + "outputId": "40d5d000-abee-46a0-c07d-1066e1662e01" }, "source": [ "# Validate YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt to yolov5x.pt...\n", - "100% 166M/166M [00:06<00:00, 28.1MB/s]\n", + "100% 166M/166M [00:10<00:00, 16.6MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n", "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 47.3MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10756.32it/s]\n", + "100% 755k/755k [00:00<00:00, 1.39MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10506.48it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:07<00:00, 2.33it/s]\n", - " all 5000 36335 0.743 0.625 0.683 0.504\n", - "Speed: 0.1ms pre-process, 4.6ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n", + " Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:06<00:00, 2.36it/s]\n", + " all 5000 36335 0.743 0.625 0.683 0.504\n", + "Speed: 0.1ms pre-process, 4.6ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.41s)\n", + "Done (t=0.38s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.64s)\n", + "DONE (t=5.49s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=76.80s).\n", + "DONE (t=72.10s).\n", "Accumulating evaluation results...\n", - "DONE (t=14.61s).\n", + "DONE (t=13.94s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n", @@ -682,13 +717,13 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "47759d5e-34f0-4a6a-c714-ff533391cfff" + "outputId": "f0ce0354-7f50-4546-f3f9-672b4b522d59" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", @@ -696,7 +731,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", @@ -705,8 +740,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 75.3MB/s]\n", - "Dataset download success ✅ (0.7s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 76.7MB/s]\n", + "Dataset download success ✅ (0.5s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -740,33 +775,33 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7246.20it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7984.87it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 986.21it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 1018.19it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Sun, 21 Aug 2022 17:07:56 +0200 Subject: [PATCH 051/326] De-thread TensorBoard graph logging (#9071) * De-thread TensorBoard graph logging Issues with Classification models Signed-off-by: Glenn Jocher * Update __init__.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/loggers/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 98a123eee74d..006125edbcd9 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -11,7 +11,7 @@ import torch from torch.utils.tensorboard import SummaryWriter -from utils.general import colorstr, cv2, threaded +from utils.general import colorstr, cv2 from utils.loggers.clearml.clearml_utils import ClearmlLogger from utils.loggers.wandb.wandb_utils import WandbLogger from utils.plots import plot_images, plot_labels, plot_results @@ -292,7 +292,6 @@ def log_model(self, model_path, epoch=0, metadata={}): wandb.log_artifact(art) -@threaded def log_tensorboard_graph(tb, model, imgsz=(640, 640)): # Log model graph to TensorBoard try: From 262187e95d304f80abf08abd850b7b5076f2a7a9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 23:26:07 +0200 Subject: [PATCH 052/326] Two dimensional `size=(h,w)` AutoShape support (#9072) * Two dimensional `size=(h,w)` AutoShape support May resolve https://github.com/ultralytics/yolov5/issues/9039 Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- hubconf.py | 10 +++++++--- models/common.py | 8 +++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/hubconf.py b/hubconf.py index 293f177dcbc1..0a7f917bd7d1 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load - from models.yolo import DetectionModel + from models.yolo import ClassificationModel, DetectionModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device @@ -45,8 +45,12 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model - if autoshape and isinstance(model.model, DetectionModel): - model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS + if autoshape: + if model.pt and isinstance(model.model, ClassificationModel): + LOGGER.warning('WARNING: YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') + else: + model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: diff --git a/models/common.py b/models/common.py index 44192e622bb5..d308244c4a44 100644 --- a/models/common.py +++ b/models/common.py @@ -589,7 +589,7 @@ def _apply(self, fn): @smart_inference_mode() def forward(self, ims, size=640, augment=False, profile=False): - # Inference from various sources. For height=640, width=1280, RGB images example inputs are: + # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are: # file: ims = 'data/images/zidane.jpg' # str or PosixPath # URI: = 'https://ultralytics.com/images/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) @@ -600,6 +600,8 @@ def forward(self, ims, size=640, augment=False, profile=False): dt = (Profile(), Profile(), Profile()) with dt[0]: + if isinstance(size, int): # expand + size = (size, size) p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(ims, torch.Tensor): # torch @@ -622,10 +624,10 @@ def forward(self, ims, size=640, augment=False, profile=False): im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input s = im.shape[:2] # HWC shape0.append(s) # image shape - g = (size / max(s)) # gain + g = max(size) / max(s) # gain shape1.append([y * g for y in s]) ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape + shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 From 0abae780b356aa29332f7d50552e0ed88e38ee3a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 22 Aug 2022 00:04:30 +0200 Subject: [PATCH 053/326] Remove unused Timeout import (#9073) * Remove unused Timeout import Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 7e1de43aba1b..d35e2bdd168a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -19,8 +19,8 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, - increment_path, is_ascii, threaded, try_except, xywh2xyxy, xyxy2xywh) +from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, + is_ascii, threaded, try_except, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings From 06831aa9e905e0fa703958f6b3f3db443cf477f3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 22 Aug 2022 01:06:29 +0200 Subject: [PATCH 054/326] Improved Usage example docstrings (#9075) * Updated Usage examples * Update detect.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/predict.py | 23 +++++++++++++++++++---- classify/train.py | 11 +++++++---- classify/val.py | 16 ++++++++++++++-- detect.py | 36 ++++++++++++++++++------------------ export.py | 22 +++++++++++----------- hubconf.py | 4 ++-- models/tf.py | 2 +- models/yolo.py | 2 +- train.py | 17 ++++++++++------- val.py | 24 ++++++++++++------------ 10 files changed, 95 insertions(+), 62 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 0bf99140b8e3..135470fd36ed 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,9 +1,24 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run classification inference on file/dir/URL/glob - -Usage: - $ python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg +Run YOLOv5 classification inference on images, videos, directories, and globs. + +Usage - sources: + $ python classify/predict.py --weights yolov5s.pt --source img.jpg # image + vid.mp4 # video + path/ # directory + 'path/*.jpg' # glob + +Usage - formats: + $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch + yolov5s-cls.torchscript # TorchScript + yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-cls.xml # OpenVINO + yolov5s-cls.engine # TensorRT + yolov5s-cls.mlmodel # CoreML (macOS-only) + yolov5s-cls_saved_model # TensorFlow SavedModel + yolov5s-cls.pb # TensorFlow GraphDef + yolov5s-cls.tflite # TensorFlow Lite + yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU """ import argparse diff --git a/classify/train.py b/classify/train.py index 8fe90c1b19eb..223367260bad 100644 --- a/classify/train.py +++ b/classify/train.py @@ -1,13 +1,16 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Train a YOLOv5 classifier model on a classification dataset -Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/custom/dataset' -YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt -Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html -Usage - Single-GPU and Multi-GPU DDP +Usage - Single-GPU training: $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 128 + +Usage - Multi-GPU DDP training: $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 + +Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' +YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt +Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html """ import argparse diff --git a/classify/val.py b/classify/val.py index 2353737957d3..bf808bc21a84 100644 --- a/classify/val.py +++ b/classify/val.py @@ -1,10 +1,22 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Validate a classification model on a dataset +Validate a trained YOLOv5 classification model on a classification dataset Usage: $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) - $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate + $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet + +Usage - formats: + $ python classify/val.py --weights yolov5s-cls.pt # PyTorch + yolov5s-cls.torchscript # TorchScript + yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-cls.xml # OpenVINO + yolov5s-cls.engine # TensorRT + yolov5s-cls.mlmodel # CoreML (macOS-only) + yolov5s-cls_saved_model # TensorFlow SavedModel + yolov5s-cls.pb # TensorFlow GraphDef + yolov5s-cls.tflite # TensorFlow Lite + yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU """ import argparse diff --git a/detect.py b/detect.py index 93ae0baccd13..541ad90e051d 100644 --- a/detect.py +++ b/detect.py @@ -1,27 +1,27 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run inference on images, videos, directories, streams, etc. +Run YOLOv5 detection inference on images, videos, directories, streams, etc. Usage - sources: - $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + $ python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: - $ python path/to/detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU + $ python detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU """ import argparse diff --git a/export.py b/export.py index 166b5f406a20..7a746156b96d 100644 --- a/export.py +++ b/export.py @@ -21,19 +21,19 @@ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU Usage: - $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... + $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... Inference: - $ python path/to/detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU + $ python detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example diff --git a/hubconf.py b/hubconf.py index 0a7f917bd7d1..33fc87930582 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,11 +1,11 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ +PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 Usage: import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') - model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # file from branch + model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # custom model from branch """ import torch diff --git a/models/tf.py b/models/tf.py index b0d98cc2a3a9..ecb0d4d79c78 100644 --- a/models/tf.py +++ b/models/tf.py @@ -7,7 +7,7 @@ $ python models/tf.py --weights yolov5s.pt Export: - $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs + $ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs """ import argparse diff --git a/models/yolo.py b/models/yolo.py index 32a47e9591da..e154b72685b4 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -3,7 +3,7 @@ YOLO-specific modules Usage: - $ python path/to/models/yolo.py --cfg yolov5s.yaml + $ python models/yolo.py --cfg yolov5s.yaml """ import argparse diff --git a/train.py b/train.py index e4c9b6ae6749..0cd4a7f065a6 100644 --- a/train.py +++ b/train.py @@ -1,15 +1,18 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Train a YOLOv5 model on a custom dataset. - Models and datasets download automatically from the latest YOLOv5 release. -Models: https://github.com/ultralytics/yolov5/tree/master/models -Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data -Usage: - $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED) - $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch +Usage - Single-GPU training: + $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended) + $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch + +Usage - Multi-GPU DDP training: + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights yolov5s.pt --img 640 --device 0,1,2,3 + +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data """ import argparse diff --git a/val.py b/val.py index f9557bba651d..58b9c9e1bec0 100644 --- a/val.py +++ b/val.py @@ -1,21 +1,21 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Validate a trained YOLOv5 model accuracy on a custom dataset +Validate a trained YOLOv5 detection model on a detection dataset Usage: - $ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640 + $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 Usage - formats: - $ python path/to/val.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU + $ python val.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU """ import argparse From eab35f66f9104992a448fbd726c6c2dfdfdf240f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 22 Aug 2022 22:18:01 +0200 Subject: [PATCH 055/326] Install `torch` latest stable (#9092) Install torch 1.12.1 stable GPU assignment issues in 1.13 nightly that comes with image Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index cf2c1c5cb3cb..4b9367cc27db 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- # Install pip packages COPY requirements.txt . RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext # torch torchvision +RUN pip uninstall -y Pillow torchtext torch torchvision RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From d0fa0042bd7775b2dd191d66548f5d8b677bb756 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Aug 2022 13:06:33 +0200 Subject: [PATCH 056/326] New `@try_export` decorator (#9096) * New export decorator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * New export decorator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * rename fcn to func * rename to @try_export Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 633 +++++++++++++++++++++++------------------------ utils/general.py | 15 +- 2 files changed, 317 insertions(+), 331 deletions(-) diff --git a/export.py b/export.py index 7a746156b96d..1bb7ded8ab85 100644 --- a/export.py +++ b/export.py @@ -67,8 +67,8 @@ from models.experimental import attempt_load from models.yolo import Detect from utils.dataloaders import LoadImages -from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, check_yaml, - colorstr, file_size, print_args, url2file) +from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, + check_yaml, colorstr, file_size, get_default_args, print_args, url2file) from utils.torch_utils import select_device, smart_inference_mode @@ -89,200 +89,199 @@ def export_formats(): return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) +def try_export(inner_func): + # YOLOv5 export decorator, i..e @try_export + inner_args = get_default_args(inner_func) + + def outer_func(*args, **kwargs): + prefix = inner_args['prefix'] + try: + with Profile() as dt: + f, model = inner_func(*args, **kwargs) + LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') + return f, model + except Exception as e: + LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') + return None, None + + return outer_func + + +@try_export def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): # YOLOv5 TorchScript model export - try: - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') - - ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} - extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) - else: - ts.save(str(f), _extra_files=extra_files) + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') + f = file.with_suffix('.torchscript') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') + ts = torch.jit.trace(model, im, strict=False) + d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() + if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) + else: + ts.save(str(f), _extra_files=extra_files) + return f, None +@try_export def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export - try: - check_requirements(('onnx',)) - import onnx - - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = file.with_suffix('.onnx') - - torch.onnx.export( - model.cpu() if dynamic else model, # --dynamic only compatible with cpu - im.cpu() if dynamic else im, - f, - verbose=False, - opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={ - 'images': { - 0: 'batch', - 2: 'height', - 3: 'width'}, # shape(1,3,640,640) - 'output': { - 0: 'batch', - 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - - # Metadata - d = {'stride': int(max(model.stride)), 'names': model.names} - for k, v in d.items(): - meta = model_onnx.metadata_props.add() - meta.key, meta.value = k, str(v) - onnx.save(model_onnx, f) - - # Simplify - if simplify: - try: - cuda = torch.cuda.is_available() - check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) - import onnxsim - - LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') + check_requirements(('onnx',)) + import onnx + + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') + f = file.with_suffix('.onnx') + + torch.onnx.export( + model.cpu() if dynamic else model, # --dynamic only compatible with cpu + im.cpu() if dynamic else im, + f, + verbose=False, + opset_version=opset, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + dynamic_axes={ + 'images': { + 0: 'batch', + 2: 'height', + 3: 'width'}, # shape(1,3,640,640) + 'output': { + 0: 'batch', + 1: 'anchors'} # shape(1,25200,85) + } if dynamic else None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + + # Metadata + d = {'stride': int(max(model.stride)), 'names': model.names} + for k, v in d.items(): + meta = model_onnx.metadata_props.add() + meta.key, meta.value = k, str(v) + onnx.save(model_onnx, f) + + # Simplify + if simplify: + try: + cuda = torch.cuda.is_available() + check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) + import onnxsim + + LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + LOGGER.info(f'{prefix} simplifier failure: {e}') + return f, model_onnx +@try_export def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export - try: - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.check_output(cmd.split()) # export - with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: - yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', f'_openvino_model{os.sep}') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" + subprocess.check_output(cmd.split()) # export + with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: + yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml + return f, None +@try_export def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export - try: - check_requirements(('coremltools',)) - import coremltools as ct - - LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = file.with_suffix('.mlmodel') - - ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) - bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) - if bits < 32: - if platform.system() == 'Darwin': # quantization only supported on macOS - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning - ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) - else: - print(f'{prefix} quantization only supported on macOS, skipping...') - ct_model.save(f) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return ct_model, f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - return None, None - - -def export_engine(model, im, file, train, half, dynamic, simplify, workspace=4, verbose=False): + check_requirements(('coremltools',)) + import coremltools as ct + + LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') + f = file.with_suffix('.mlmodel') + + ts = torch.jit.trace(model, im, strict=False) # TorchScript model + ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) + bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) + if bits < 32: + if platform.system() == 'Darwin': # quantization only supported on macOS + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) + else: + print(f'{prefix} quantization only supported on macOS, skipping...') + ct_model.save(f) + return f, ct_model + + +@try_export +def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - prefix = colorstr('TensorRT:') + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' try: - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' - try: - import tensorrt as trt - except Exception: - if platform.system() == 'Linux': - check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) - import tensorrt as trt - - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 - grid = model.model[-1].anchor_grid - model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, train, dynamic, simplify) # opset 12 - model.model[-1].anchor_grid = grid - else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 13, train, dynamic, simplify) # opset 13 - onnx = file.with_suffix('.onnx') - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') + import tensorrt as trt + except Exception: + if platform.system() == 'Linux': + check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) + import tensorrt as trt + + if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + grid = model.model[-1].anchor_grid + model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] + export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 + model.model[-1].anchor_grid = grid + else: # TensorRT >= 8 + check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 + export_onnx(model, im, file, 13, False, dynamic, simplify) # opset 13 + onnx = file.with_suffix('.onnx') + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' + f = file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + LOGGER.info(f'{prefix} Network Description:') + for inp in inputs: + LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + if dynamic: + if im.shape[0] <= 1: + LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") + profile = builder.create_optimization_profile() for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - - if dynamic: - if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") - profile = builder.create_optimization_profile() - for inp in inputs: - profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) - config.add_optimization_profile(profile) - - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') - if builder.platform_has_fast_fp16 and half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) + config.add_optimization_profile(profile) + + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') + if builder.platform_has_fast_fp16 and half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + return f, None +@try_export def export_saved_model(model, im, file, @@ -296,162 +295,142 @@ def export_saved_model(model, keras=False, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export - try: - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - from models.tf import TFDetect, TFModel - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(file).replace('.pt', '_saved_model') - batch_size, ch, *imgsz = list(im.shape) # BCHW - - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow - _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) - outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) - keras_model.trainable = False - keras_model.summary() - if keras: - keras_model.save(f, save_format='tf') - else: - spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(spec) - frozen_func = convert_variables_to_constants_v2(m) - tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) - tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) - if check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return keras_model, f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - return None, None + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + from models.tf import TFModel + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = str(file).replace('.pt', '_saved_model') + batch_size, ch, *imgsz = list(im.shape) # BCHW + + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow + _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) + outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) + keras_model.trainable = False + keras_model.summary() + if keras: + keras_model.save(f, save_format='tf') + else: + spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(spec) + frozen_func = convert_variables_to_constants_v2(m) + tfm = tf.Module() + tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) + tfm.__call__(im) + tf.saved_model.save(tfm, + f, + options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version( + tf.__version__, '2.6') else tf.saved_model.SaveOptions()) + return f, keras_model +@try_export def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow - try: - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = file.with_suffix('.pb') + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = file.with_suffix('.pb') - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - frozen_func = convert_variables_to_constants_v2(m) - frozen_func.graph.as_graph_def() - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + frozen_func = convert_variables_to_constants_v2(m) + frozen_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) + return f, None +@try_export def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): # YOLOv5 TensorFlow Lite export - try: - import tensorflow as tf - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - batch_size, ch, *imgsz = list(im.shape) # BCHW - f = str(file).replace('.pt', '-fp16.tflite') - - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.target_spec.supported_types = [tf.float16] - converter.optimizations = [tf.lite.Optimize.DEFAULT] - if int8: - from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) - converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.target_spec.supported_types = [] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = True - f = str(file).replace('.pt', '-int8.tflite') - if nms or agnostic_nms: - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - tflite_model = converter.convert() - open(f, "wb").write(tflite_model) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - + import tensorflow as tf + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + batch_size, ch, *imgsz = list(im.shape) # BCHW + f = str(file).replace('.pt', '-fp16.tflite') + + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.target_spec.supported_types = [tf.float16] + converter.optimizations = [tf.lite.Optimize.DEFAULT] + if int8: + from models.tf import representative_dataset_gen + dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) + converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.target_spec.supported_types = [] + converter.inference_input_type = tf.uint8 # or tf.int8 + converter.inference_output_type = tf.uint8 # or tf.int8 + converter.experimental_new_quantizer = True + f = str(file).replace('.pt', '-int8.tflite') + if nms or agnostic_nms: + converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) + + tflite_model = converter.convert() + open(f, "wb").write(tflite_model) + return f, None + + +@try_export def export_edgetpu(file, prefix=colorstr('Edge TPU:')): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ - try: - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system - for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) - ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model - f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - + cmd = 'edgetpu_compiler --version' + help_url = 'https://coral.ai/docs/edgetpu/compiler/' + assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' + if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: + LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') + sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system + for c in ( + 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): + subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) + ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] + + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') + f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model + f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model + + cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" + subprocess.run(cmd.split(), check=True) + return f, None + + +@try_export def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export - try: - check_requirements(('tensorflowjs',)) - import re - - import tensorflowjs as tfjs - - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(file).replace('.pt', '_web_model') # js dir - f_pb = file.with_suffix('.pb') # *.pb path - f_json = f'{f}/model.json' # *.json path - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) - - json = Path(f_json).read_text() - with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order - subst = re.sub( - r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' - r'"Identity_1": {"name": "Identity_1"}, ' - r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', json) - j.write(subst) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + check_requirements(('tensorflowjs',)) + import re + + import tensorflowjs as tfjs + + LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + f = str(file).replace('.pt', '_web_model') # js dir + f_pb = file.with_suffix('.pb') # *.pb path + f_json = f'{f}/model.json' # *.json path + + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ + f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' + subprocess.run(cmd.split()) + + json = Path(f_json).read_text() + with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + subst = re.sub( + r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' + r'"Identity_1": {"name": "Identity_1"}, ' + r'"Identity_2": {"name": "Identity_2"}, ' + r'"Identity_3": {"name": "Identity_3"}}}', json) + j.write(subst) + return f, None @smart_inference_mode() @@ -524,22 +503,22 @@ def run( f = [''] * 10 # exported filenames warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning if jit: - f[0] = export_torchscript(model, im, file, optimize) + f[0], _ = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX - f[1] = export_engine(model, im, file, train, half, dynamic, simplify, workspace, verbose) + f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) if onnx or xml: # OpenVINO requires ONNX - f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) + f[2], _ = export_onnx(model, im, file, opset, train, dynamic, simplify) if xml: # OpenVINO - f[3] = export_openvino(model, file, half) + f[3], _ = export_openvino(model, file, half) if coreml: - _, f[4] = export_coreml(model, im, file, int8, half) + f[4], _ = export_coreml(model, im, file, int8, half) # TensorFlow Exports if any((saved_model, pb, tflite, edgetpu, tfjs)): if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' - model, f[5] = export_saved_model(model.cpu(), + f[5], model = export_saved_model(model.cpu(), im, file, dynamic, @@ -551,19 +530,19 @@ def run( conf_thres=conf_thres, keras=keras) if pb or tfjs: # pb prerequisite to tfjs - f[6] = export_pb(model, file) + f[6], _ = export_pb(model, file) if tflite or edgetpu: - f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) + f[7], _ = export_tflite(model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) if edgetpu: - f[8] = export_edgetpu(file) + f[8], _ = export_edgetpu(file) if tfjs: - f[9] = export_tfjs(file) + f[9], _ = export_tfjs(file) # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): h = '--half' if half else '' # --half FP16 inference arg - LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' + LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python detect.py --weights {f[-1]} {h}" f"\nValidate: python val.py --weights {f[-1]} {h}" diff --git a/utils/general.py b/utils/general.py index 3bc6fbc22d57..d8c90f10ac8f 100755 --- a/utils/general.py +++ b/utils/general.py @@ -148,6 +148,7 @@ def __init__(self, t=0.0): def __enter__(self): self.start = self.time() + return self def __exit__(self, type, value, traceback): self.dt = self.time() - self.start # delta-time @@ -220,10 +221,10 @@ def methods(instance): return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] -def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): +def print_args(args: Optional[dict] = None, show_file=True, show_func=False): # Print function arguments (optional args dict) x = inspect.currentframe().f_back # previous frame - file, _, fcn, _, _ = inspect.getframeinfo(x) + file, _, func, _, _ = inspect.getframeinfo(x) if args is None: # get args automatically args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} @@ -231,7 +232,7 @@ def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): file = Path(file).resolve().relative_to(ROOT).with_suffix('') except ValueError: file = Path(file).stem - s = (f'{file}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '') + s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) @@ -255,7 +256,13 @@ def init_seeds(seed=0, deterministic=False): def intersect_dicts(da, db, exclude=()): # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} + + +def get_default_args(func): + # Get func() default arguments + signature = inspect.signature(func) + return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} def get_latest_run(search_dir='.'): From 48e56d3c9bede445d49e8f2af458d70955032e91 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Aug 2022 14:37:46 +0200 Subject: [PATCH 057/326] Add optional `transforms` argument to LoadStreams() (#9105) * Add optional `transforms` argument to LoadStreams() Prepare for streaming classification support Signed-off-by: Glenn Jocher * Cleanup Signed-off-by: Glenn Jocher * fix * batch size > 1 fix Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 54 ++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index e73b20a58915..675c2898e7d7 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -251,7 +251,7 @@ def __next__(self): s = f'image {self.count}/{self.nf} {path}: ' if self.transforms: - im = self.transforms(cv2.cvtColor(im0, cv2.COLOR_BGR2RGB)) # classify transforms + im = self.transforms(cv2.cvtColor(im0, cv2.COLOR_BGR2RGB)) # transforms else: im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB @@ -289,22 +289,20 @@ def __next__(self): raise StopIteration # Read frame - ret_val, img0 = self.cap.read() - img0 = cv2.flip(img0, 1) # flip left-right + ret_val, im0 = self.cap.read() + im0 = cv2.flip(im0, 1) # flip left-right # Print assert ret_val, f'Camera Error {self.pipe}' img_path = 'webcam.jpg' s = f'webcam {self.count}: ' - # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride)[0] + # Process + im = letterbox(im0, self.img_size, stride=self.stride)[0] # resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) - - return img_path, img, img0, None, s + return img_path, im, im0, None, s def __len__(self): return 0 @@ -312,7 +310,7 @@ def __len__(self): class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None): self.mode = 'stream' self.img_size = img_size self.stride = stride @@ -326,7 +324,6 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): n = len(sources) self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later - self.auto = auto for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' @@ -353,8 +350,10 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): LOGGER.info('') # newline # check for common shapes - s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) + s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + self.auto = auto and self.rect + self.transforms = transforms # optional if not self.rect: LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') @@ -385,18 +384,15 @@ def __next__(self): cv2.destroyAllWindows() raise StopIteration - # Letterbox - img0 = self.imgs.copy() - img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0] - - # Stack - img = np.stack(img, 0) - - # Convert - img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW - img = np.ascontiguousarray(img) + im0 = self.imgs.copy() + if self.transforms: + im = np.stack([self.transforms(cv2.cvtColor(x, cv2.COLOR_BGR2RGB)) for x in im0]) # transforms + else: + im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + im = np.ascontiguousarray(im) # contiguous - return self.sources, img, img0, None, '' + return self.sources, im, im0, None, '' def __len__(self): return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years @@ -836,7 +832,7 @@ def collate_fn(batch): @staticmethod def collate_fn4(batch): - img, label, path, shapes = zip(*batch) # transposed + im, label, path, shapes = zip(*batch) # transposed n = len(shapes) // 4 im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] @@ -846,13 +842,13 @@ def collate_fn4(batch): for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW i *= 4 if random.random() < 0.5: - im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', - align_corners=False)[0].type(img[i].type()) + im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', + align_corners=False)[0].type(im[i].type()) lb = label[i] else: - im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2) lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s - im4.append(im) + im4.append(im1) label4.append(lb) for i, lb in enumerate(label4): From 51c9f9229731021f55a9ceb9f9504abfc979a54b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Aug 2022 17:54:51 +0200 Subject: [PATCH 058/326] Streaming Classification support (#9106) * Streaming Classification support * Streaming Classification support * Streaming Classification support --- classify/predict.py | 168 +++++++++++++++++++++++++++++++---------- detect.py | 2 +- utils/augmentations.py | 1 + 3 files changed, 131 insertions(+), 40 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 135470fd36ed..b430c0645f21 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,12 +1,15 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run YOLOv5 classification inference on images, videos, directories, and globs. +Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. Usage - sources: - $ python classify/predict.py --weights yolov5s.pt --source img.jpg # image - vid.mp4 # video - path/ # directory - 'path/*.jpg' # glob + $ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch @@ -23,9 +26,11 @@ import argparse import os +import platform import sys from pathlib import Path +import torch.backends.cudnn as cudnn import torch.nn.functional as F FILE = Path(__file__).resolve() @@ -36,45 +41,70 @@ from models.common import DetectMultiBackend from utils.augmentations import classify_transforms -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages -from utils.general import LOGGER, Profile, check_file, check_requirements, colorstr, increment_path, print_args +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, print_args, strip_optimizer) +from utils.plots import Annotator from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() def run( weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob - imgsz=224, # inference size + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(224, 224), # inference size (height, width) device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + nosave=False, # do not save images/videos + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/predict-cls', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference - project=ROOT / 'runs/predict-cls', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment ): source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download - dt = Profile(), Profile(), Profile() - device = select_device(device) - # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - save_dir.mkdir(parents=True, exist_ok=True) # make dir + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) - model.warmup(imgsz=(1, 3, imgsz, imgsz)) # warmup - dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz)) - for seen, (path, im, im0s, vid_cap, s) in enumerate(dataset): - # Image + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + if webcam: + view_img = check_imshow() + cudnn.benchmark = True # set True to speed up constant image size inference + dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0])) + bs = len(dataset) # batch_size + else: + dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0])) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = im.unsqueeze(0).to(device) - im = im.half() if model.fp16 else im.float() + im = im.to(device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + if len(im.shape) == 3: + im = im[None] # expand for batch dim # Inference with dt[1]: @@ -82,33 +112,93 @@ def run( # Post-process with dt[2]: - p = F.softmax(results, dim=1) # probabilities - i = p.argsort(1, descending=True)[:, :5].squeeze().tolist() # top 5 indices - # if save: - # imshow_cls(im, f=save_dir / Path(path).name, verbose=True) - LOGGER.info( - f"{s}{imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}, {dt[1].dt * 1E3:.1f}ms") + pred = F.softmax(results, dim=1) # probabilities + + # Process predictions + for i, prob in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0 = path[i], im0s[i].copy() + s += f'{i}: ' + else: + p, im0 = path, im0s.copy() + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + s += '%gx%g ' % im.shape[2:] # print string + annotator = Annotator(im0, example=str(names), pil=True) + + # Print results + top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices + s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, " + + # Write results + if save_img or view_img: # Add bbox to image + text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) + annotator.text((64, 64), text, txt_color=(255, 255, 255)) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + cv2.waitKey(1) # 1 millisecond + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") # Print results - t = tuple(x.t / (seen + 1) * 1E3 for x in dt) # speeds per image - shape = (1, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - return p + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) return opt diff --git a/detect.py b/detect.py index 541ad90e051d..60a821b59a03 100644 --- a/detect.py +++ b/detect.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run YOLOv5 detection inference on images, videos, directories, streams, etc. +Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. Usage - sources: $ python detect.py --weights yolov5s.pt --source 0 # webcam diff --git a/utils/augmentations.py b/utils/augmentations.py index a55fefa68a76..c8499b3fc8ae 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -344,4 +344,5 @@ def classify_albumentations(augment=True, def classify_transforms(size=224): # Transforms to apply if albumentations not installed + assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' return T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) From e6f54c5b32340278474e922d456fa3eb7f74599d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Aug 2022 23:54:05 +0200 Subject: [PATCH 059/326] Fix numpy to torch cls streaming bug (#9112) * Fix numpy to torch cls streaming bug Resolves https://github.com/ultralytics/yolov5/issues/9111 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/classify/predict.py b/classify/predict.py index b430c0645f21..b33b5bcc9933 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -30,6 +30,7 @@ import sys from pathlib import Path +import torch import torch.backends.cudnn as cudnn import torch.nn.functional as F @@ -101,7 +102,7 @@ def run( seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = im.to(device) + im = torch.Tensor(im).to(device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 if len(im.shape) == 3: im = im[None] # expand for batch dim From f8816f58b7f4bf018ec0fdf546430295e5719205 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 24 Aug 2022 15:45:37 +0530 Subject: [PATCH 060/326] Infer Loggers project name (#9117) * smart project name inference * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update __init__.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 006125edbcd9..59d4b566836a 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -252,7 +252,7 @@ def __init__(self, opt, console_logger, include=('tb', 'wandb')): self.tb = SummaryWriter(str(self.save_dir)) if wandb and 'wandb' in self.include: - self.wandb = wandb.init(project="YOLOv5-Classifier" if opt.project == "runs/train" else opt.project, + self.wandb = wandb.init(project=web_project_name(str(opt.project)), name=None if opt.name == "exp" else opt.name, config=opt) else: @@ -303,3 +303,11 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) except Exception as e: print(f'WARNING: TensorBoard graph visualization failure {e}') + + +def web_project_name(project): + # Convert local project name to web project name + if not project.startswith('runs/train'): + return project + suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else '' + return f'YOLOv5{suffix}' From f0e5a608f50ac647827bede88fded7908c7edeab Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Aug 2022 12:31:50 +0200 Subject: [PATCH 061/326] Add CSV logging to GenericLogger (#9128) Enable CSV logging for Classify training. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/loggers/__init__.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 59d4b566836a..880039b1914c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -242,9 +242,10 @@ class GenericLogger: def __init__(self, opt, console_logger, include=('tb', 'wandb')): # init default loggers - self.save_dir = opt.save_dir + self.save_dir = Path(opt.save_dir) self.include = include self.console_logger = console_logger + self.csv = self.save_dir / 'results.csv' # CSV logger if 'tb' in self.include: prefix = colorstr('TensorBoard: ') self.console_logger.info( @@ -258,14 +259,21 @@ def __init__(self, opt, console_logger, include=('tb', 'wandb')): else: self.wandb = None - def log_metrics(self, metrics_dict, epoch): + def log_metrics(self, metrics, epoch): # Log metrics dictionary to all loggers + if self.csv: + keys, vals = list(metrics.keys()), list(metrics.values()) + n = len(metrics) + 1 # number of cols + s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header + with open(self.csv, 'a') as f: + f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + if self.tb: - for k, v in metrics_dict.items(): + for k, v in metrics.items(): self.tb.add_scalar(k, v, epoch) if self.wandb: - self.wandb.log(metrics_dict, step=epoch) + self.wandb.log(metrics, step=epoch) def log_images(self, files, name='Images', epoch=0): # Log images to all loggers @@ -291,6 +299,11 @@ def log_model(self, model_path, epoch=0, metadata={}): art.add_file(str(model_path)) wandb.log_artifact(art) + def update_params(self, params): + # Update the paramters logged + if self.wandb: + wandb.run.config.update(params, allow_val_change=True) + def log_tensorboard_graph(tb, model, imgsz=(640, 640)): # Log model graph to TensorBoard From d07ddc69960ed71111457cbe41ab25ded1ab3155 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Aug 2022 14:34:26 +0200 Subject: [PATCH 062/326] New TryExcept decorator (#9154) * New TryExcept decorator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/__init__.py | 27 ++++++++++++++++++ utils/general.py | 27 ++---------------- utils/metrics.py | 73 ++++++++++++++++++++++++----------------------- utils/plots.py | 5 ++-- 4 files changed, 71 insertions(+), 61 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index a63c473a4340..7466a486caf4 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -3,6 +3,33 @@ utils/initialization """ +import contextlib +import threading + + +class TryExcept(contextlib.ContextDecorator): + # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager + def __init__(self, msg='default message here'): + self.msg = msg + + def __enter__(self): + pass + + def __exit__(self, exc_type, value, traceback): + if value: + print(f'{self.msg}: {value}') + return True + + +def threaded(func): + # Multi-threads a target function and returns thread. Usage: @threaded decorator + def wrapper(*args, **kwargs): + thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) + thread.start() + return thread + + return wrapper + def notebook_init(verbose=True): # Check system software and hardware diff --git a/utils/general.py b/utils/general.py index d8c90f10ac8f..91b13f84a6c4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -15,7 +15,6 @@ import shutil import signal import sys -import threading import time import urllib from datetime import datetime @@ -34,6 +33,7 @@ import torchvision import yaml +from utils import TryExcept from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness @@ -195,27 +195,6 @@ def __exit__(self, exc_type, exc_val, exc_tb): os.chdir(self.cwd) -def try_except(func): - # try-except function. Usage: @try_except decorator - def handler(*args, **kwargs): - try: - func(*args, **kwargs) - except Exception as e: - print(e) - - return handler - - -def threaded(func): - # Multi-threads a target function and returns thread. Usage: @threaded decorator - def wrapper(*args, **kwargs): - thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) - thread.start() - return thread - - return wrapper - - def methods(instance): # Get class/instance methods return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] @@ -319,7 +298,7 @@ def git_describe(path=ROOT): # path must be a directory return '' -@try_except +@TryExcept() @WorkingDirectory(ROOT) def check_git_status(repo='ultralytics/yolov5'): # YOLOv5 status check, recommend 'git pull' if code is out of date @@ -364,7 +343,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals return result -@try_except +@TryExcept() def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()): # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') diff --git a/utils/metrics.py b/utils/metrics.py index 8fa3c7e217c7..de1bf05b326b 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -11,6 +11,8 @@ import numpy as np import torch +from utils import TryExcept, threaded + def fitness(x): # Model fitness as a weighted combination of metrics @@ -184,36 +186,35 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class + @TryExcept('WARNING: ConfusionMatrix plot failure') def plot(self, normalize=True, save_dir='', names=()): - try: - import seaborn as sn - - array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns - array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) - - fig = plt.figure(figsize=(12, 9), tight_layout=True) - nc, nn = self.nc, len(names) # number of classes, names - sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size - labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered - sn.heatmap(array, - annot=nc < 30, - annot_kws={ - "size": 8}, - cmap='Blues', - fmt='.2f', - square=True, - vmin=0.0, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) - fig.axes[0].set_xlabel('True') - fig.axes[0].set_ylabel('Predicted') - plt.title('Confusion Matrix') - fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) - plt.close() - except Exception as e: - print(f'WARNING: ConfusionMatrix plot failure: {e}') + import seaborn as sn + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, + ax=ax, + annot=nc < 30, + annot_kws={ + "size": 8}, + cmap='Blues', + fmt='.2f', + square=True, + vmin=0.0, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + ax.set_ylabel('True') + ax.set_ylabel('Predicted') + ax.set_title('Confusion Matrix') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close(fig) def print(self): for i in range(self.nc + 1): @@ -320,6 +321,7 @@ def wh_iou(wh1, wh2, eps=1e-7): # Plots ---------------------------------------------------------------------------------------------------------------- +@threaded def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): # Precision-recall curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) @@ -336,12 +338,13 @@ def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): ax.set_ylabel('Precision') ax.set_xlim(0, 1) ax.set_ylim(0, 1) - plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - plt.title('Precision-Recall Curve') + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title('Precision-Recall Curve') fig.savefig(save_dir, dpi=250) - plt.close() + plt.close(fig) +@threaded def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): # Metric-confidence curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) @@ -358,7 +361,7 @@ def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confi ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) - plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - plt.title(f'{ylabel}-Confidence Curve') + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title(f'{ylabel}-Confidence Curve') fig.savefig(save_dir, dpi=250) - plt.close() + plt.close(fig) diff --git a/utils/plots.py b/utils/plots.py index d35e2bdd168a..2aa163268336 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -19,8 +19,9 @@ import torch from PIL import Image, ImageDraw, ImageFont +from utils import TryExcept, threaded from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, - is_ascii, threaded, try_except, xywh2xyxy, xyxy2xywh) + is_ascii, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings @@ -339,7 +340,7 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ plt.savefig(f, dpi=300) -@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 +@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") From 729dc169baeab2eb55b79ef0c29e3174306c8a0e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Aug 2022 15:04:27 +0200 Subject: [PATCH 063/326] Fixed Classify offsets (#9155) --- classify/predict.py | 2 +- utils/plots.py | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index b33b5bcc9933..937704d0f080 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -136,7 +136,7 @@ def run( # Write results if save_img or view_img: # Add bbox to image text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) - annotator.text((64, 64), text, txt_color=(255, 255, 255)) + annotator.text((32, 32), text, txt_color=(255, 255, 255)) # Stream results im0 = annotator.result() diff --git a/utils/plots.py b/utils/plots.py index 2aa163268336..0f322b6b5844 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -117,10 +117,12 @@ def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) self.draw.rectangle(xy, fill, outline, width) - def text(self, xy, text, txt_color=(255, 255, 255)): + def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): # Add text to image (PIL-only) - w, h = self.font.getsize(text) # text width, height - self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) + if anchor == 'bottom': # start y from font bottom + w, h = self.font.getsize(text) # text width, height + xy[1] += 1 - h + self.draw.text(xy, text, fill=txt_color, font=self.font) def result(self): # Return annotated image as array @@ -222,7 +224,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: ti = targets[targets[:, 0] == i] # image targets boxes = xywh2xyxy(ti[:, 2:6]).T From 30e674b14d6bb4e13ceea84a5ef67d08e6dd2f7d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Aug 2022 15:06:20 +0200 Subject: [PATCH 064/326] New YOLOv5 v6.2 splash images (#9142) * New YOLOv5 v6.2 splash images @AyushExel @AlanDimmer Signed-off-by: Glenn Jocher * Created using Colaboratory * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 98 +++++++++++++++++++++--------------------- README.md | 100 +++++++++++++++++++++---------------------- tutorial.ipynb | 42 +++++++++--------- 3 files changed, 119 insertions(+), 121 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 46aafd86ec9b..bb62714f003f 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,55 +1,55 @@
-

- - -

-
- -[English](../README.md) | 简体中文 -
- CI CPU testing - YOLOv5 Citation - Docker Pulls -
- Open In Colab - Open In Kaggle - Join Forum -
- -
-

-YOLOv5🚀是一个在COCO数据集上预训练的物体检测架构和模型系列,它代表了Ultralytics对未来视觉AI方法的公开研究,其中包含了在数千小时的研究和开发中所获得的经验和最佳实践。 -

- -
- - - - - - - - - - - - - - - - - - - - +

+ + +

+ +   + + +

+ + [English](../README.md) | 简体中文 +
+
+ CI CPU testing + YOLOv5 Citation + Docker Pulls +
+ Open In Colab + Open In Kaggle + Join Forum +
+ +
+

+ YOLOv5🚀是一个在COCO数据集上预训练的物体检测架构和模型系列,它代表了Ultralytics对未来视觉AI方法的公开研究,其中包含了在数千小时的研究和开发中所获得的经验和最佳实践。 +

+ +
+ + + + + + + + + + + + + + + + + + + + +
- - -
##
文件
diff --git a/README.md b/README.md index 89e4f1199cde..1d6b4e153d82 100644 --- a/README.md +++ b/README.md @@ -1,56 +1,56 @@
-

- - -

- -English | [简体中文](.github/README_cn.md) -
-
- CI CPU testing - YOLOv5 Citation - Docker Pulls -
- Open In Colab - Open In Kaggle - Join Forum -
- -
-

-YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -

- -
- - - - - - - - - - - - - - - - - - - - +

+ + +

+ +   + + +

+ + English | [简体中文](.github/README_cn.md) +
+
+ CI CPU testing + YOLOv5 Citation + Docker Pulls +
+ Open In Colab + Open In Kaggle + Join Forum +
+ +
+

+ YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ +
+ + + + + + + + + + + + + + + + + + + + +
- - -
##
Documentation
diff --git a/tutorial.ipynb b/tutorial.ipynb index 5b7b1f287d7e..3af5517c9623 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -7,8 +7,7 @@ "provenance": [], "collapsed_sections": [], "machine_shape": "hm", - "toc_visible": true, - "include_colab_link": true + "toc_visible": true }, "kernelspec": { "name": "python3", @@ -381,27 +380,26 @@ } }, "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "view-in-github", - "colab_type": "text" - }, - "source": [ - "\"Open" - ] - }, { "cell_type": "markdown", "metadata": { "id": "t6MPjfT5NrKQ" }, "source": [ - "\n", - "\n", + "
\n", + "\n", + " \n", + " \n", + "\n", "\n", - "This is the **official YOLOv5 🚀 notebook** by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", - "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" + "
\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" ] }, { @@ -412,7 +410,7 @@ "source": [ "# Setup\n", "\n", - "Clone repo, install dependencies and check PyTorch and GPU." + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." ] }, { @@ -433,7 +431,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -485,7 +483,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -555,7 +553,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -603,7 +601,7 @@ "# Validate YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -723,7 +721,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", From f2b8f3fe3a3ae2b601706e5bea9f25265eb2fcd9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Aug 2022 22:17:28 +0200 Subject: [PATCH 065/326] Created using Colaboratory --- tutorial.ipynb | 474 +++++++++++++++++++++++-------------------------- 1 file changed, 218 insertions(+), 256 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 3af5517c9623..12840063b1f1 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,121 +16,110 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "da0946bcefd9414fa282977f7f609e36": { + "9b8caa3522fc4cbab31e13b5dfc7808d": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", - "model_module_version": "2.0.0", + "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", + "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", + "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_7838c0af44244ccc906c413cea0989d7", - "IPY_MODEL_309ea78b3e814198b4080beb878d5329", - "IPY_MODEL_b2d1d998e5db4ca1a36280902e1647c7" + "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", + "IPY_MODEL_35e03ce5090346c9ae602891470fc555", + "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" ], - "layout": "IPY_MODEL_e7d7f56c77884717ba122f1d603c0852", - "tabbable": null, - "tooltip": null + "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" } }, - "7838c0af44244ccc906c413cea0989d7": { + "574140e4c4bc48c9a171541a02cd0211": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", - "model_module_version": "2.0.0", + "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", + "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", + "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_abf60d6b8ea847f9bb358ae2b045458b", + "description_tooltip": null, + "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", "placeholder": "​", - "style": "IPY_MODEL_379196a2761b4a29aca8ef088dc60c10", - "tabbable": null, - "tooltip": null, + "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", "value": "100%" } }, - "309ea78b3e814198b4080beb878d5329": { + "35e03ce5090346c9ae602891470fc555": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", - "model_module_version": "2.0.0", + "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", + "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", + "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_52b546a356e54174a95049b30cb52c81", + "description_tooltip": null, + "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_0889e134327e4aa0a8719d03a0d6941b", - "tabbable": null, - "tooltip": null, + "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", "value": 818322941 } }, - "b2d1d998e5db4ca1a36280902e1647c7": { + "c942c208e72d46568b476bb0f2d75496": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", - "model_module_version": "2.0.0", + "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", + "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", + "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_30f22a3e42d24f10ad9851f40a6703f3", + "description_tooltip": null, + "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", "placeholder": "​", - "style": "IPY_MODEL_648b3512bb7d4ccca5d75af36c133e92", - "tabbable": null, - "tooltip": null, - "value": " 780M/780M [01:31<00:00, 12.3MB/s]" + "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", + "value": " 780M/780M [02:19<00:00, 6.24MB/s]" } }, - "e7d7f56c77884717ba122f1d603c0852": { + "65881db1db8a4e9c930fab9172d45143": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "2.0.0", + "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", + "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, + "border": null, "bottom": null, "display": null, "flex": null, @@ -158,6 +147,8 @@ "object_position": null, "order": null, "overflow": null, + "overflow_x": null, + "overflow_y": null, "padding": null, "right": null, "top": null, @@ -165,25 +156,22 @@ "width": null } }, - "abf60d6b8ea847f9bb358ae2b045458b": { + "60b913d755b34d638478e30705a2dde1": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "2.0.0", + "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", + "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, + "border": null, "bottom": null, "display": null, "flex": null, @@ -211,6 +199,8 @@ "object_position": null, "order": null, "overflow": null, + "overflow_x": null, + "overflow_y": null, "padding": null, "right": null, "top": null, @@ -218,43 +208,37 @@ "width": null } }, - "379196a2761b4a29aca8ef088dc60c10": { + "0856bea36ec148b68522ff9c9eb258d8": { "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLStyleModel", - "model_module_version": "2.0.0", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null + "description_width": "" } }, - "52b546a356e54174a95049b30cb52c81": { + "76879f6f2aa54637a7a07faeea2bd684": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "2.0.0", + "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", + "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, + "border": null, "bottom": null, "display": null, "flex": null, @@ -282,6 +266,8 @@ "object_position": null, "order": null, "overflow": null, + "overflow_x": null, + "overflow_y": null, "padding": null, "right": null, "top": null, @@ -289,41 +275,38 @@ "width": null } }, - "0889e134327e4aa0a8719d03a0d6941b": { + "0ace3934ec6f4d36a1b3a9e086390926": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", - "model_module_version": "2.0.0", + "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", + "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, - "30f22a3e42d24f10ad9851f40a6703f3": { + "d6b7a2243e0c4beca714d99dceec23d6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "2.0.0", + "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", + "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, + "border": null, "bottom": null, "display": null, "flex": null, @@ -351,6 +334,8 @@ "object_position": null, "order": null, "overflow": null, + "overflow_x": null, + "overflow_y": null, "padding": null, "right": null, "top": null, @@ -358,22 +343,19 @@ "width": null } }, - "648b3512bb7d4ccca5d75af36c133e92": { + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLStyleModel", - "model_module_version": "2.0.0", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null + "description_width": "" } } } @@ -420,7 +402,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "4200fd6f-c6f5-4505-a4f9-a918f3ed1f86" + "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -431,13 +413,13 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" ] }, { @@ -477,29 +459,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "1af15107-bcd1-4e8f-b5bd-0ee1a737e051" + "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 41.7MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 27.8MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.5ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 18.9ms\n", - "Speed: 0.5ms pre-process, 16.7ms inference, 21.4ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.8ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 20.1ms\n", + "Speed: 0.6ms pre-process, 17.4ms inference, 21.6ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -531,29 +513,29 @@ "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", - "height": 17, + "height": 49, "referenced_widgets": [ - "da0946bcefd9414fa282977f7f609e36", - "7838c0af44244ccc906c413cea0989d7", - "309ea78b3e814198b4080beb878d5329", - "b2d1d998e5db4ca1a36280902e1647c7", - "e7d7f56c77884717ba122f1d603c0852", - "abf60d6b8ea847f9bb358ae2b045458b", - "379196a2761b4a29aca8ef088dc60c10", - "52b546a356e54174a95049b30cb52c81", - "0889e134327e4aa0a8719d03a0d6941b", - "30f22a3e42d24f10ad9851f40a6703f3", - "648b3512bb7d4ccca5d75af36c133e92" + "9b8caa3522fc4cbab31e13b5dfc7808d", + "574140e4c4bc48c9a171541a02cd0211", + "35e03ce5090346c9ae602891470fc555", + "c942c208e72d46568b476bb0f2d75496", + "65881db1db8a4e9c930fab9172d45143", + "60b913d755b34d638478e30705a2dde1", + "0856bea36ec148b68522ff9c9eb258d8", + "76879f6f2aa54637a7a07faeea2bd684", + "0ace3934ec6f4d36a1b3a9e086390926", + "d6b7a2243e0c4beca714d99dceec23d6", + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" ] }, - "outputId": "5f129105-eca5-4f33-fb1d-981255f814ad" + "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "display_data", @@ -564,24 +546,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "da0946bcefd9414fa282977f7f609e36" - }, - "application/json": { - "n": 0, - "total": 818322941, - "elapsed": 0.020366430282592773, - "ncols": null, - "nrows": null, - "prefix": "", - "ascii": false, - "unit": "B", - "unit_scale": true, - "rate": null, - "bar_format": null, - "postfix": null, - "unit_divisor": 1024, - "initial": 0, - "colour": null + "model_id": "9b8caa3522fc4cbab31e13b5dfc7808d" } }, "metadata": {} @@ -595,60 +560,57 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "40d5d000-abee-46a0-c07d-1066e1662e01" + "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" }, "source": [ - "# Validate YOLOv5x on COCO val\n", - "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" + "# Validate YOLOv5s on COCO val\n", + "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", - "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt to yolov5x.pt...\n", - "100% 166M/166M [00:10<00:00, 16.6MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Fusing layers... \n", - "YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n", + "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 1.39MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10506.48it/s]\n", + "100% 755k/755k [00:00<00:00, 52.7MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10509.20it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:06<00:00, 2.36it/s]\n", - " all 5000 36335 0.743 0.625 0.683 0.504\n", - "Speed: 0.1ms pre-process, 4.6ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n", + " Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 157/157 [00:50<00:00, 3.10it/s]\n", + " all 5000 36335 0.67 0.521 0.566 0.371\n", + "Speed: 0.1ms pre-process, 1.0ms inference, 1.5ms NMS per image at shape (32, 3, 640, 640)\n", "\n", - "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", + "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.38s)\n", + "Done (t=0.81s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.49s)\n", + "DONE (t=5.62s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=72.10s).\n", + "DONE (t=77.03s).\n", "Accumulating evaluation results...\n", - "DONE (t=13.94s).\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n", - " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", - " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.340\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.558\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.651\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.631\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.684\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.528\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.737\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.833\n", + "DONE (t=14.63s).\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.211\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.423\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.489\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.311\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.516\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.566\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.378\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.625\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.724\n", "Results saved to \u001b[1mruns/val/exp\u001b[0m\n" ] } @@ -715,13 +677,13 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "f0ce0354-7f50-4546-f3f9-672b4b522d59" + "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", @@ -729,7 +691,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", @@ -738,8 +700,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 76.7MB/s]\n", - "Dataset download success ✅ (0.5s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 41.1MB/s]\n", + "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -773,11 +735,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7984.87it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 9659.25it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 1018.19it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 951.31it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 26 Aug 2022 14:34:28 +0200 Subject: [PATCH 066/326] Rename onnx_dynamic -> dynamic (#9168) --- export.py | 2 +- models/yolo.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index 1bb7ded8ab85..0f26e63e9adc 100644 --- a/export.py +++ b/export.py @@ -489,7 +489,7 @@ def run( for k, m in model.named_modules(): if isinstance(m, Detect): m.inplace = inplace - m.onnx_dynamic = dynamic + m.dynamic = dynamic m.export = True for _ in range(2): diff --git a/models/yolo.py b/models/yolo.py index e154b72685b4..7a7308312a14 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -37,7 +37,7 @@ class Detect(nn.Module): stride = None # strides computed during build - onnx_dynamic = False # ONNX export parameter + dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer @@ -60,7 +60,7 @@ def forward(self, x): x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference - if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: + if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) y = x[i].sigmoid() From 5d3d051c9b6bb25c45d254ceabab669c758ed72b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Aug 2022 15:29:31 +0200 Subject: [PATCH 067/326] Inline `_make_grid()` meshgrid (#9170) * Inline _make_grid() meshgrid Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/yolo.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 7a7308312a14..fa05fcf9a8d9 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -81,10 +81,7 @@ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) - if torch_1_10: # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility - yv, xv = torch.meshgrid(y, x, indexing='ij') - else: - yv, xv = torch.meshgrid(y, x) + yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid From cff9717d730710ad0f5e858ca54cb19731e6a6b5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Aug 2022 20:06:26 +0200 Subject: [PATCH 068/326] Comment EMA assert (#9173) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 5fbe8bbf10f6..abf0bbc19a98 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -422,7 +422,7 @@ def update(self, model): if v.dtype.is_floating_point: # true for FP16 and FP32 v *= d v += (1 - d) * msd[k].detach() - assert v.dtype == msd[k].detach().dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must both be FP32' + # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32' def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes From ffbce3858ae3d0d1d0978a5927daa2d4f94e55b6 Mon Sep 17 00:00:00 2001 From: HighMans <42877729+HighMans@users.noreply.github.com> Date: Fri, 26 Aug 2022 19:39:11 -0400 Subject: [PATCH 069/326] Fix confidence threshold for ClearML debug images (#9174) * Fix confidence threshold The confidence is converted to a percentage on line 144, but it is being compared to a default conf_threshold value of a decimal value instead of percent value. Signed-off-by: HighMans <42877729+HighMans@users.noreply.github.com> * Revert "Fix confidence threshold" This reverts commit f84a09967f83d70626ca8dfe7625dce60fb0102e. * Fix confidence comparison Fix the confidence percentage is being compared to a decimal value. Signed-off-by: HighMans <42877729+HighMans@users.noreply.github.com> --- utils/loggers/clearml/clearml_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 52320c090ddd..1e136907367d 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -141,10 +141,10 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres color = colors(i) class_name = class_names[int(class_nr)] - confidence = round(float(conf) * 100, 2) - label = f"{class_name}: {confidence}%" + confidence_percentage = round(float(conf) * 100, 2) + label = f"{class_name}: {confidence_percentage}%" - if confidence > conf_threshold: + if conf > conf_threshold: annotator.rectangle(box.cpu().numpy(), outline=color) annotator.box_label(box.cpu().numpy(), label=label, color=color) From f58fe6b6c12f1b0d25d95ab07a6656b87ac31b25 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Aug 2022 21:36:05 +0200 Subject: [PATCH 070/326] Update Dockerfile-cpu (#9184) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-cpu | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index d61dfeffe22c..5dc75d83c20f 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -18,7 +18,8 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu tensorflowjs \ + coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ + # openvino-dev \ --extra-index-url https://download.pytorch.org/whl/cpu # Create working directory From 985e000d813c739fe6e4c05b8df6f80f40ca3c7a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Aug 2022 21:48:58 +0200 Subject: [PATCH 071/326] Update Dockerfile-cpu to libpython3-dev (#9185) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 5dc75d83c20f..d6fac645dba1 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -11,7 +11,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages RUN apt update RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3.8-dev +RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev # RUN alias python=python3 # Install pip packages From 53711bacea004389a603697e02c5aa8f7cd4b78e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Aug 2022 22:14:21 +0200 Subject: [PATCH 072/326] Update Dockerfile-arm64 to libpython3-dev (#9187) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-arm64 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index fe92c8d56146..6e8ff77545c5 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -11,8 +11,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages RUN apt update RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc \ - libgl1-mesa-glx libglib2.0-0 libpython3.8-dev +RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev # RUN alias python=python3 # Install pip packages From 13530402f8b960544aed45db4f71d7056a3ffdfc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 01:51:41 +0200 Subject: [PATCH 073/326] Fix AutoAnchor MPS bug (#9188) Resolves https://github.com/ultralytics/yolov5/issues/8862 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/autoanchor.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index f2222203e24c..ac17c6cafc90 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -10,6 +10,7 @@ import yaml from tqdm import tqdm +from utils import TryExcept from utils.general import LOGGER, colorstr PREFIX = colorstr('AutoAnchor: ') @@ -25,6 +26,7 @@ def check_anchor_order(m): m.anchors[:] = m.anchors.flip(0) +@TryExcept(f'{PREFIX}ERROR:') def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() @@ -49,10 +51,7 @@ def metric(k): # compute metric else: LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') na = m.anchors.numel() // 2 # number of anchors - try: - anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - except Exception as e: - LOGGER.info(f'{PREFIX}ERROR: {e}') + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) new_bpr = metric(anchors)[0] if new_bpr > bpr: # replace anchors anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) @@ -124,7 +123,7 @@ def print_results(k, verbose=True): i = (wh0 < 3.0).any(1).sum() if i: LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') - wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 # Kmeans init @@ -167,4 +166,4 @@ def print_results(k, verbose=True): if verbose: print_results(k, verbose) - return print_results(k) + return print_results(k).astype(np.float32) From e57275a2d8713ec6b6fe88fd341d24c6c6e2d29d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 02:00:42 +0200 Subject: [PATCH 074/326] Skip AMP check on MPS (#9189) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 91b13f84a6c4..842f28c60886 100755 --- a/utils/general.py +++ b/utils/general.py @@ -535,8 +535,8 @@ def amp_allclose(model, im): prefix = colorstr('AMP: ') device = next(model.parameters()).device # get model device - if device.type == 'cpu': - return False # AMP disabled on CPU + if device.type in ('cpu', 'mps'): + return False # AMP only used on CUDA devices f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) try: From cf5d9cbc33ed6849801311765d0c90cdce8ddfd9 Mon Sep 17 00:00:00 2001 From: HighMans <42877729+HighMans@users.noreply.github.com> Date: Mon, 29 Aug 2022 08:58:55 -0400 Subject: [PATCH 075/326] ClearML's set_report_period's time is defined in minutes not seconds. (#9186) * ClearML's set_report_period's time is defined in minutes not seconds. https://clear.ml/docs/latest/docs/references/sdk/hpo_optimization_hyperparameteroptimizer/#set_report_period set_report_period function takes in time in terms of minutes, not seconds. Signed-off-by: HighMans <42877729+HighMans@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: HighMans <42877729+HighMans@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/clearml/hpo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py index 96c2c544c84c..ee518b0fbfc8 100644 --- a/utils/loggers/clearml/hpo.py +++ b/utils/loggers/clearml/hpo.py @@ -69,7 +69,7 @@ ) # report every 10 seconds, this is way too often, but we are testing here -optimizer.set_report_period(10) +optimizer.set_report_period(10 / 60) # You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent # an_optimizer.start_locally(job_complete_callback=job_complete_callback) # set the time limit for the optimization process (2 hours) From f65081c4360887ead430e44ee3eb23566a8145ef Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 15:00:55 +0200 Subject: [PATCH 076/326] Add `check_git_status(..., branch='master')` argument (#9199) Add check_git_status(branch='master') argument Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 842f28c60886..ac9b0cefd7c8 100755 --- a/utils/general.py +++ b/utils/general.py @@ -300,7 +300,7 @@ def git_describe(path=ROOT): # path must be a directory @TryExcept() @WorkingDirectory(ROOT) -def check_git_status(repo='ultralytics/yolov5'): +def check_git_status(repo='ultralytics/yolov5', branch='master'): # YOLOv5 status check, recommend 'git pull' if code is out of date url = f'https://github.com/{repo}' msg = f', for updates see {url}' @@ -316,10 +316,10 @@ def check_git_status(repo='ultralytics/yolov5'): remote = 'ultralytics' check_output(f'git remote add {remote} {url}', shell=True) check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch - branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(check_output(f'git rev-list {branch}..{remote}/master --count', shell=True)) # commits behind + local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind if n > 0: - pull = 'git pull' if remote == 'origin' else f'git pull {remote} master' + pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." else: s += f'up to date with {url} ✅' From fef1913d288a170a19df33493e241b593de99e41 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 15:12:15 +0200 Subject: [PATCH 077/326] `check_font()` on notebook init (#9200) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/__init__.py b/utils/__init__.py index 7466a486caf4..8bdffd47b3b2 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -38,10 +38,12 @@ def notebook_init(verbose=True): import os import shutil - from utils.general import check_requirements, emojis, is_colab + from utils.general import check_font, check_requirements, emojis, is_colab from utils.torch_utils import select_device # imports check_requirements(('psutil', 'IPython')) + check_font() + import psutil from IPython import display # to display images and clear console output From bd5fd78411115f8f9ebed4c95a26f0d3da316ac5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 17:13:55 +0200 Subject: [PATCH 078/326] Comment `protobuf` in requirements.txt (#9207) The low package version is causing conflicts among other dependencies, commenting it causes no ill effects in CI so this should be fine. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 10620566ca66..44fe1ce697b7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ scipy>=1.4.1 torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.64.0 -protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 +# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging ------------------------------------- tensorboard>=2.4.1 From da22e01a6818199d9222a13e58aa145b0477c342 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 18:10:19 +0200 Subject: [PATCH 079/326] `check_font()` fstring update (#9208) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index ac9b0cefd7c8..3e42e887283c 100755 --- a/utils/general.py +++ b/utils/general.py @@ -456,7 +456,7 @@ def check_font(font=FONT, progress=False): font = Path(font) file = CONFIG_DIR / font.name if not font.exists() and not file.exists(): - url = "https://ultralytics.com/assets/" + font.name + url = f'https://ultralytics.com/assets/{font.name}' LOGGER.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, str(file), progress=progress) From 3c64d891043643cede117c8e54e30e35aecf2e56 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 20:06:00 +0200 Subject: [PATCH 080/326] AutoBatch protect from extreme batch sizes (#9209) If < 1 or > 1024 set output to default batch size 16. May partially address https://github.com/ultralytics/yolov5/issues/9156 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/autobatch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 8d12e46f0f09..01152055196d 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -60,8 +60,8 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): i = results.index(None) # first fail index if b >= batch_sizes[i]: # y intercept above failure point b = batch_sizes[max(i - 1, 0)] # select prior safe point - if b < 1: # zero or negative batch size - b = 16 + if b < 1 or b > 1024: # b outside of safe range + b = batch_size LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') fraction = np.polyval(p, b) / t # actual fraction predicted From 91a81d48fa4e34dbdbaf0e45a1f841c11216aab5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 20:41:54 +0200 Subject: [PATCH 081/326] Default AutoBatch 0.8 fraction (#9212) --- hubconf.py | 2 +- utils/autobatch.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index 33fc87930582..bffe2d588b4f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -47,7 +47,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING: YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + LOGGER.warning('WARNING: ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS diff --git a/utils/autobatch.py b/utils/autobatch.py index 01152055196d..641b055b9fe3 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -18,7 +18,7 @@ def check_train_batch_size(model, imgsz=640, amp=True): return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size -def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): +def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): # Automatically estimate best batch size to use `fraction` of available CUDA memory # Usage: # import torch From f37ac8d611c0972851831fdf534cdb2b7f126cff Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 30 Aug 2022 11:36:38 +0200 Subject: [PATCH 082/326] Delete rebase.yml (#9202) * Delete rebase.yml No longer required with new built-in GitHub PR merge master feature Signed-off-by: Glenn Jocher * Update CONTRIBUTING.md Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update CONTRIBUTING.md Signed-off-by: Glenn Jocher * cleanup Signed-off-by: Glenn Jocher --- .github/workflows/greetings.yml | 14 ++++---------- .github/workflows/rebase.yml | 21 --------------------- CONTRIBUTING.md | 23 +++++++++-------------- 3 files changed, 13 insertions(+), 45 deletions(-) delete mode 100644 .github/workflows/rebase.yml diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index d5dad7a25559..91bf190eb727 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -17,16 +17,10 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} pr-message: | 👋 Hello @${{ github.actor }}, thank you for submitting a YOLOv5 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to: - - ✅ Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an automatic [GitHub Actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) merge may be attempted by writing /rebase in a new comment, or by running the following code, replacing 'feature' with the name of your local branch: - ```bash - git remote add upstream https://github.com/ultralytics/yolov5.git - git fetch upstream - # git checkout feature # <--- replace 'feature' with local branch name - git merge upstream/master - git push -u origin -f - ``` - - ✅ Verify all Continuous Integration (CI) **checks are passing**. - - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee + + - ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally. + - ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**. + - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee issue-message: | 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml deleted file mode 100644 index a4dc9e5092fd..000000000000 --- a/.github/workflows/rebase.yml +++ /dev/null @@ -1,21 +0,0 @@ -# https://github.com/marketplace/actions/automatic-rebase - -name: Automatic Rebase -on: - issue_comment: - types: [created] -jobs: - rebase: - name: Rebase - if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase') - runs-on: ubuntu-latest - steps: - - name: Checkout the latest code - uses: actions/checkout@v3 - with: - token: ${{ secrets.ACTIONS_TOKEN }} - fetch-depth: 0 # otherwise, you will fail to push refs to dest repo - - name: Automatic Rebase - uses: cirrus-actions/rebase@1.7 - env: - GITHUB_TOKEN: ${{ secrets.ACTIONS_TOKEN }} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 13b9b73b50cc..7498f8995d40 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,20 +45,15 @@ changes** button. All done, your PR is now submitted to YOLOv5 for review and ap To allow your work to be integrated as seamlessly as possible, we advise you to: -- ✅ Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an - automatic [GitHub Actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) merge may - be attempted by writing /rebase in a new comment, or by running the following code, replacing 'feature' with the name - of your local branch: - -```bash -git remote add upstream https://github.com/ultralytics/yolov5.git -git fetch upstream -# git checkout feature # <--- replace 'feature' with local branch name -git merge upstream/master -git push -u origin -f -``` - -- ✅ Verify all Continuous Integration (CI) **checks are passing**. +- ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update + your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally. + +

Screenshot 2022-08-29 at 22 47 15

+ +- ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**. + +

Screenshot 2022-08-29 at 22 47 03

+ - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee From 5fb267f3e5dc86675d508e1b08d20fc0e2e84003 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 30 Aug 2022 13:40:51 +0200 Subject: [PATCH 083/326] Duplicate segment verification fix (#9225) Solution by @Laughing-q to resolve duplicate segment verification bug in https://github.com/ultralytics/yolov5/pull/9052#issuecomment-1231426638 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 675c2898e7d7..f027307ccb94 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -963,7 +963,7 @@ def verify_image_label(args): if len(i) < nl: # duplicate row check lb = lb[i] # remove duplicates if segments: - segments = segments[i] + segments = [segments[x] for x in i] msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' else: ne = 1 # label empty From 6e7a7ae7edee8f66d7ce5617f9f75724bb7d6992 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 30 Aug 2022 15:17:58 +0200 Subject: [PATCH 084/326] New `LetterBox(size)` `CenterCrop(size)`, `ToTensor()` transforms (#9213) * New LetterBox transform YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([T.ToTensor(), LetterBox(size)]) Signed-off-by: Glenn Jocher * Update augmentations.py Signed-off-by: Glenn Jocher * Update augmentations.py Signed-off-by: Glenn Jocher * Update augmentations.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup * cleanup * cleanup * cleanup * cleanup Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/augmentations.py | 50 +++++++++++++++++++++++++++++++++++++++++- utils/dataloaders.py | 22 +++++++++---------- 2 files changed, 60 insertions(+), 12 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index c8499b3fc8ae..a5587351f75b 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -8,6 +8,7 @@ import cv2 import numpy as np +import torch import torchvision.transforms as T import torchvision.transforms.functional as TF @@ -345,4 +346,51 @@ def classify_albumentations(augment=True, def classify_transforms(size=224): # Transforms to apply if albumentations not installed assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' - return T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + + +class LetterBox: + # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/utils/dataloaders.py b/utils/dataloaders.py index f027307ccb94..d4ab592bbea7 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -251,7 +251,7 @@ def __next__(self): s = f'image {self.count}/{self.nf} {path}: ' if self.transforms: - im = self.transforms(cv2.cvtColor(im0, cv2.COLOR_BGR2RGB)) # transforms + im = self.transforms(im0) # transforms else: im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB @@ -386,7 +386,7 @@ def __next__(self): im0 = self.imgs.copy() if self.transforms: - im = np.stack([self.transforms(cv2.cvtColor(x, cv2.COLOR_BGR2RGB)) for x in im0]) # transforms + im = np.stack([self.transforms(x) for x in im0]) # transforms else: im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW @@ -1113,18 +1113,18 @@ def __init__(self, root, augment, imgsz, cache=False): def __getitem__(self, i): f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR if self.album_transforms: - if self.cache_ram and im is None: - im = self.samples[i][3] = cv2.imread(f) - elif self.cache_disk: - if not fn.exists(): # load npy - np.save(fn.as_posix(), cv2.imread(f)) - im = np.load(fn) - else: # read image - im = cv2.imread(f) # BGR sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] else: - sample = self.torch_transforms(self.loader(f)) + sample = self.torch_transforms(im) return sample, j From 4a37381ee8f9b650dde21fe352a94ff932c5b08d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 30 Aug 2022 16:18:01 +0200 Subject: [PATCH 085/326] Add ClassificationModel TF export assert (#9226) * Add ClassificationModel TF export assert Export to TF not yet supported, warning alerts users. Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 0f26e63e9adc..4d0144af9efb 100644 --- a/export.py +++ b/export.py @@ -65,7 +65,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.experimental import attempt_load -from models.yolo import Detect +from models.yolo import ClassificationModel, Detect from utils.dataloaders import LoadImages from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, check_yaml, colorstr, file_size, get_default_args, print_args, url2file) @@ -518,6 +518,7 @@ def run( if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' + assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' f[5], model = export_saved_model(model.cpu(), im, file, From 5f1000a499dc8de5f9083412796324ebe091ba10 Mon Sep 17 00:00:00 2001 From: Yannick Merkli Date: Tue, 30 Aug 2022 21:57:36 +0200 Subject: [PATCH 086/326] Remove usage of `pathlib.Path.unlink(missing_ok=...)` (#9227) remove usage of pathlib.Path.unlink(missing_ok=...) Co-authored-by: Yannick Merkli --- utils/dataloaders.py | 4 +++- utils/downloads.py | 18 ++++++++++++------ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index d4ab592bbea7..c61068ea316f 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -917,7 +917,9 @@ def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), ann indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files - [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing + for x in txt: + if (path.parent / x).exists(): + (path.parent / x).unlink() # remove existing print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) for i, img in tqdm(zip(indices, files), total=n): diff --git a/utils/downloads.py b/utils/downloads.py index 69887a579966..b56fc28c3bde 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -44,12 +44,14 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check except Exception as e: # url2 - file.unlink(missing_ok=True) # remove partial downloads + if file.exists(): + file.unlink() # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check - file.unlink(missing_ok=True) # remove partial downloads + if file.exists(): + file.unlink() # remove partial downloads LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") LOGGER.info('') @@ -112,8 +114,10 @@ def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): file = Path(file) cookie = Path('cookie') # gdrive cookie print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') - file.unlink(missing_ok=True) # remove existing file - cookie.unlink(missing_ok=True) # remove existing cookie + if file.exists(): + file.unlink() # remove existing file + if cookie.exists(): + cookie.unlink() # remove existing cookie # Attempt file download out = "NUL" if platform.system() == "Windows" else "/dev/null" @@ -123,11 +127,13 @@ def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): else: # small file s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' r = os.system(s) # execute, capture return - cookie.unlink(missing_ok=True) # remove existing cookie + if cookie.exists(): + cookie.unlink() # remove existing cookie # Error check if r != 0: - file.unlink(missing_ok=True) # remove partial + if file.exists(): + file.unlink() # remove partial print('Download error ') # raise Exception('Download error') return r From 79e181a83badd31c5013fffa0b80b55ff090c761 Mon Sep 17 00:00:00 2001 From: spacewalk01 Date: Thu, 1 Sep 2022 00:31:13 +0900 Subject: [PATCH 087/326] Add support for *`.pfm` images (#9230) add support for pfm image --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index c61068ea316f..84215925284e 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -36,7 +36,7 @@ # Parameters HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html From 55b009616b4701f73311d1272cc87057d84a93e6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 31 Aug 2022 18:53:46 +0200 Subject: [PATCH 088/326] Python check warning emoji (#9238) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 3e42e887283c..bc978ea221f3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -335,7 +335,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool - s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string + s = f'WARNING: ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, s # assert min requirements met if verbose and not result: From 223c59dbe07357a0bf760ea49cef6e1d7b66df91 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Sep 2022 12:13:53 +0200 Subject: [PATCH 089/326] Add `url_getsize()` function (#9247) * Add `url_getsize()` function Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update downloads.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/downloads.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/utils/downloads.py b/utils/downloads.py index b56fc28c3bde..dd2698f995a4 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -33,6 +33,12 @@ def gsutil_getsize(url=''): return eval(s.split(' ')[0]) if len(s) else 0 # bytes +def url_getsize(url='https://ultralytics.com/images/bus.jpg'): + # Return downloadable file size in bytes + response = requests.head(url, allow_redirects=True) + return int(response.headers.get('content-length', -1)) + + def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes from utils.general import LOGGER From c91d1db7161f4cffe70535378b81faf3ff4549b4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Sep 2022 14:30:21 +0200 Subject: [PATCH 090/326] Update dataloaders.py (#9250) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 84215925284e..a4e6c0cfef18 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -40,6 +40,7 @@ VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -83,7 +84,7 @@ def exif_transpose(image): 5: Image.TRANSPOSE, 6: Image.ROTATE_270, 7: Image.TRANSVERSE, - 8: Image.ROTATE_90,}.get(orientation) + 8: Image.ROTATE_90}.get(orientation) if method is not None: image = image.transpose(method) del exif[0x0112] @@ -144,7 +145,7 @@ def create_dataloader(path, shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, - pin_memory=True, + pin_memory=PIN_MEMORY, collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, worker_init_fn=seed_worker, generator=generator), dataset @@ -1152,6 +1153,6 @@ def create_classification_dataloader(path, shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, - pin_memory=True, + pin_memory=PIN_MEMORY, worker_init_fn=seed_worker, generator=generator) # or DataLoader(persistent_workers=True) From 2d082a07bd28952159bf534c8728865ba577a449 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 1 Sep 2022 22:47:36 +0530 Subject: [PATCH 091/326] Refactor Loggers : Move code outside train.py (#9241) * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- train.py | 11 +++++------ utils/loggers/__init__.py | 11 +++++++++++ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/train.py b/train.py index 0cd4a7f065a6..29293aa612cf 100644 --- a/train.py +++ b/train.py @@ -91,17 +91,16 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio data_dict = None if RANK in {-1, 0}: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance - if loggers.clearml: - data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML - if loggers.wandb: - data_dict = loggers.wandb.data_dict - if resume: - weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size # Register actions for k in methods(loggers): callbacks.register_action(k, callback=getattr(loggers, k)) + # Process custom dataset artifact link + data_dict = loggers.remote_dataset + if resume: # If resuming runs from remote artifact + weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size + # Config plots = not evolve and not opt.noplots # create plots cuda = device.type != 'cpu' diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 880039b1914c..1aa8427f9127 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -107,6 +107,17 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, else: self.clearml = None + @property + def remote_dataset(self): + # Get data_dict if custom dataset artifact link is provided + data_dict = None + if self.clearml: + data_dict = self.clearml.data_dict + if self.wandb: + data_dict = self.wandb.data_dict + + return data_dict + def on_train_start(self): # Callback runs on train start pass From ea98199041088a378b4f13316ba96afc637dfb83 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Sep 2022 19:36:27 +0200 Subject: [PATCH 092/326] Update general.py (#9252) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index bc978ea221f3..ba6d9e165901 100755 --- a/utils/general.py +++ b/utils/general.py @@ -337,7 +337,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals result = (current == minimum) if pinned else (current >= minimum) # bool s = f'WARNING: ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: - assert result, s # assert min requirements met + assert result, emojis(s) # assert min requirements met if verbose and not result: LOGGER.warning(s) return result From 9da6d0f9f5bc37fa386b7b82d2a963f94650949a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Sep 2022 22:30:26 +0200 Subject: [PATCH 093/326] Add LoadImages._cv2_rotate() (#9249) Optional manual rotation code per iPhone rotation issue in https://github.com/ultralytics/yolov5/issues/8493 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index a4e6c0cfef18..5f86f83786db 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -213,7 +213,7 @@ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None): self.auto = auto self.transforms = transforms # optional if any(videos): - self.new_video(videos[0]) # new video + self._new_video(videos[0]) # new video else: self.cap = None assert self.nf > 0, f'No images or videos found in {p}. ' \ @@ -238,10 +238,11 @@ def __next__(self): if self.count == self.nf: # last video raise StopIteration path = self.files[self.count] - self.new_video(path) + self._new_video(path) ret_val, im0 = self.cap.read() self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 auto rotation is False s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: @@ -260,10 +261,23 @@ def __next__(self): return path, im, im0, self.cap, s - def new_video(self, path): + def _new_video(self, path): + # Create a new video capture object self.frame = 0 self.cap = cv2.VideoCapture(path) self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im def __len__(self): return self.nf # number of files From ffdb58b0e07d964eb2d148a6814d22a4a26d47cc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Sep 2022 14:12:10 +0200 Subject: [PATCH 094/326] Move `cudnn.benchmarks(True)` to LoadStreams (#9258) * Move cudnn.benchmarks(True) to LoadStreams * Update dataloaders.py Signed-off-by: Glenn Jocher * Move cudnn.benchmarks(True) to LoadStreams Signed-off-by: Glenn Jocher --- classify/predict.py | 2 -- detect.py | 2 -- utils/dataloaders.py | 54 ++++---------------------------------------- 3 files changed, 4 insertions(+), 54 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 937704d0f080..76115c75029f 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -31,7 +31,6 @@ from pathlib import Path import torch -import torch.backends.cudnn as cudnn import torch.nn.functional as F FILE = Path(__file__).resolve() @@ -89,7 +88,6 @@ def run( # Dataloader if webcam: view_img = check_imshow() - cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0])) bs = len(dataset) # batch_size else: diff --git a/detect.py b/detect.py index 60a821b59a03..cf75d0f11c92 100644 --- a/detect.py +++ b/detect.py @@ -31,7 +31,6 @@ from pathlib import Path import torch -import torch.backends.cudnn as cudnn FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory @@ -97,7 +96,6 @@ def run( # Dataloader if webcam: view_img = check_imshow() - cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5f86f83786db..38ae3399ce26 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -283,62 +283,17 @@ def __len__(self): return self.nf # number of files -class LoadWebcam: # for inference - # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0` - def __init__(self, pipe='0', img_size=640, stride=32): - self.img_size = img_size - self.stride = stride - self.pipe = eval(pipe) if pipe.isnumeric() else pipe - self.cap = cv2.VideoCapture(self.pipe) # video capture object - self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size - - def __iter__(self): - self.count = -1 - return self - - def __next__(self): - self.count += 1 - if cv2.waitKey(1) == ord('q'): # q to quit - self.cap.release() - cv2.destroyAllWindows() - raise StopIteration - - # Read frame - ret_val, im0 = self.cap.read() - im0 = cv2.flip(im0, 1) # flip left-right - - # Print - assert ret_val, f'Camera Error {self.pipe}' - img_path = 'webcam.jpg' - s = f'webcam {self.count}: ' - - # Process - im = letterbox(im0, self.img_size, stride=self.stride)[0] # resize - im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - im = np.ascontiguousarray(im) # contiguous - - return img_path, im, im0, None, s - - def __len__(self): - return 0 - - class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None): + torch.backends.cudnn.benchmark = True # faster for fixed-size inference self.mode = 'stream' self.img_size = img_size self.stride = stride - - if os.path.isfile(sources): - with open(sources) as f: - sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] - else: - sources = [sources] - + sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources] n = len(sources) - self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' @@ -377,8 +332,7 @@ def update(self, i, cap, stream): n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame while cap.isOpened() and n < f: n += 1 - # _, self.imgs[index] = cap.read() - cap.grab() + cap.grab() # .read() = .grab() followed by .retrieve() if n % read == 0: success, im = cap.retrieve() if success: From 5d4787baabea694369ad95c7d762139eb9f04e56 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Sep 2022 16:05:23 +0200 Subject: [PATCH 095/326] `cudnn.benchmark = True` on Seed 0 (#9259) * `cudnn.benchmark = True` on Seed 0 Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/utils/general.py b/utils/general.py index ba6d9e165901..25a1a1456009 100755 --- a/utils/general.py +++ b/utils/general.py @@ -217,20 +217,17 @@ def print_args(args: Optional[dict] = None, show_file=True, show_func=False): def init_seeds(seed=0, deterministic=False): # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html - # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible - import torch.backends.cudnn as cudnn - - if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 - torch.use_deterministic_algorithms(True) - os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' - os.environ['PYTHONHASHSEED'] = str(seed) - random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) - cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe + torch.backends.cudnn.benchmark = True # for faster training + if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 + torch.use_deterministic_algorithms(True) + torch.backends.cudnn.deterministic = True + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + os.environ['PYTHONHASHSEED'] = str(seed) def intersect_dicts(da, db, exclude=()): From 15e82d296720d4be344bf42a34d60ffd57b3eb28 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Sep 2022 16:24:30 +0200 Subject: [PATCH 096/326] Update `TryExcept(msg='...')`` (#9261) --- utils/__init__.py | 4 ++-- utils/autoanchor.py | 2 +- utils/metrics.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index 8bdffd47b3b2..46225c2208ce 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -9,7 +9,7 @@ class TryExcept(contextlib.ContextDecorator): # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager - def __init__(self, msg='default message here'): + def __init__(self, msg=''): self.msg = msg def __enter__(self): @@ -17,7 +17,7 @@ def __enter__(self): def __exit__(self, exc_type, value, traceback): if value: - print(f'{self.msg}: {value}') + print(f'{self.msg}{value}') return True diff --git a/utils/autoanchor.py b/utils/autoanchor.py index ac17c6cafc90..0b49ab3319c0 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -26,7 +26,7 @@ def check_anchor_order(m): m.anchors[:] = m.anchors.flip(0) -@TryExcept(f'{PREFIX}ERROR:') +@TryExcept(f'{PREFIX}ERROR: ') def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() diff --git a/utils/metrics.py b/utils/metrics.py index de1bf05b326b..ee7d33982cfc 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -186,7 +186,7 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class - @TryExcept('WARNING: ConfusionMatrix plot failure') + @TryExcept('WARNING: ConfusionMatrix plot failure: ') def plot(self, normalize=True, save_dir='', names=()): import seaborn as sn From 5cb9fe612a215e0b7f6d99bf39e91cc52ab13c53 Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Sat, 3 Sep 2022 20:49:25 +0200 Subject: [PATCH 097/326] Make sure best.pt model file is preserved ClearML (#9265) * Make sure best.pt model file is preserved ClearML * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 1aa8427f9127..3aee35844f52 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -233,7 +233,9 @@ def on_train_end(self, last, best, epoch, results): self.wandb.finish_run() if self.clearml and not self.opt.evolve: - self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), name='Best Model') + self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), + name='Best Model', + auto_delete_file=False) def on_params_update(self, params: dict): # Update hyperparams or configs of the experiment From 63ecce60eab055bd5fec3223ee2b8d8a3d099349 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Sep 2022 01:33:38 +0200 Subject: [PATCH 098/326] DetectMultiBackend improvements (#9269) * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/models/common.py b/models/common.py index d308244c4a44..2e5d5a198e33 100644 --- a/models/common.py +++ b/models/common.py @@ -354,6 +354,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] session = onnxruntime.InferenceSession(w, providers=providers) + output_names = [x.name for x in session.get_outputs()] meta = session.get_modelmeta().custom_metadata_map # metadata if 'stride' in meta: stride, names = int(meta['stride']), eval(meta['names']) @@ -372,9 +373,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, batch_size = batch_dim.get_length() executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 output_layer = next(iter(executable_network.outputs)) - meta = Path(w).with_suffix('.yaml') - if meta.exists(): - stride, names = self._load_metadata(meta) # load metadata + stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download @@ -476,7 +475,7 @@ def forward(self, im, augment=False, visualize=False, val=False): y = self.net.forward() elif self.onnx: # ONNX Runtime im = im.cpu().numpy() # torch to numpy - y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})[0] elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 y = self.executable_network([im])[self.output_layer] @@ -524,7 +523,7 @@ def forward(self, im, augment=False, visualize=False, val=False): y[..., :4] *= [w, h, w, h] # xywh normalized to pixels if isinstance(y, np.ndarray): - y = torch.tensor(y, device=self.device) + y = torch.from_numpy(y).to(self.device) return (y, []) if val else y def warmup(self, imgsz=(1, 3, 640, 640)): @@ -548,10 +547,12 @@ def _model_type(p='path/to/model.pt'): return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs @staticmethod - def _load_metadata(f='path/to/meta.yaml'): + def _load_metadata(f=Path('path/to/meta.yaml')): # Load metadata from meta.yaml if it exists - d = yaml_load(f) - return d['stride'], d['names'] # assign stride, names + if f.exists(): + d = yaml_load(f) + return d['stride'], d['names'] # assign stride, names + return None, None class AutoShape(nn.Module): From 96c3c7f71d6af51819c270e2752603665680ced7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Sep 2022 14:01:43 +0200 Subject: [PATCH 099/326] Update DetectMultiBackend for tuple outputs (#9274) Update --- models/common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 2e5d5a198e33..5c82b18f102c 100644 --- a/models/common.py +++ b/models/common.py @@ -465,17 +465,15 @@ def forward(self, im, augment=False, visualize=False, val=False): if self.pt: # PyTorch y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) - if isinstance(y, tuple): - y = y[0] elif self.jit: # TorchScript - y = self.model(im)[0] + y = self.model(im) elif self.dnn: # ONNX OpenCV DNN im = im.cpu().numpy() # torch to numpy self.net.setInput(im) y = self.net.forward() elif self.onnx: # ONNX Runtime im = im.cpu().numpy() # torch to numpy - y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})[0] + y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 y = self.executable_network([im])[self.output_layer] @@ -522,6 +520,8 @@ def forward(self, im, augment=False, visualize=False, val=False): y = (y.astype(np.float32) - zero_point) * scale # re-scale y[..., :4] *= [w, h, w, h] # xywh normalized to pixels + if isinstance(y, (list, tuple)): + y = y[0] if isinstance(y, np.ndarray): y = torch.from_numpy(y).to(self.device) return (y, []) if val else y From 7aa263c5f2f526472435babf86ddd33eed1dbd78 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Sep 2022 15:39:57 +0200 Subject: [PATCH 100/326] Update DetectMultiBackend for tuple outputs 2 (#9275) * Update DetectMultiBackend for tuple outputs 2 Signed-off-by: Glenn Jocher * Update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update * Update * Update Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 12 +++++++----- utils/general.py | 3 +++ val.py | 4 ++-- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/models/common.py b/models/common.py index 5c82b18f102c..7ac3a4a29672 100644 --- a/models/common.py +++ b/models/common.py @@ -457,7 +457,7 @@ def wrap_frozen_graph(gd, inputs, outputs): self.__dict__.update(locals()) # assign all variables to self - def forward(self, im, augment=False, visualize=False, val=False): + def forward(self, im, augment=False, visualize=False): # YOLOv5 MultiBackend inference b, ch, h, w = im.shape # batch, channel, height, width if self.fp16 and im.dtype != torch.float16: @@ -521,10 +521,12 @@ def forward(self, im, augment=False, visualize=False, val=False): y[..., :4] *= [w, h, w, h] # xywh normalized to pixels if isinstance(y, (list, tuple)): - y = y[0] - if isinstance(y, np.ndarray): - y = torch.from_numpy(y).to(self.device) - return (y, []) if val else y + return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] + else: + return self.from_numpy(y) + + def from_numpy(self, x): + return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once diff --git a/utils/general.py b/utils/general.py index 25a1a1456009..cae63fd9dd21 100755 --- a/utils/general.py +++ b/utils/general.py @@ -813,6 +813,9 @@ def non_max_suppression(prediction, list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ + if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) + prediction = prediction[0] # select only inference output + bs = prediction.shape[0] # batch size nc = prediction.shape[2] - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates diff --git a/val.py b/val.py index 58b9c9e1bec0..5427ee7b3619 100644 --- a/val.py +++ b/val.py @@ -204,11 +204,11 @@ def run( # Inference with dt[1]: - out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs + out, train_out = model(im) if compute_loss else (model(im, augment=augment), None) # Loss if compute_loss: - loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls + loss += compute_loss(train_out, targets)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels From e45d335bbc4a891a2a9f49311f4448e252d3d88f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Sep 2022 16:35:16 +0200 Subject: [PATCH 101/326] Update benchmarks CI with `--hard-fail` min metric floor (#9276) * Update benchmarks CI with min metric floor Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update benchmarks.py Signed-off-by: Glenn Jocher * Update benchmarks.py Signed-off-by: Glenn Jocher * Update benchmarks.py Signed-off-by: Glenn Jocher * Update benchmarks.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- utils/benchmarks.py | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 4ef930c61233..6fb277676959 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -39,7 +39,7 @@ jobs: pip list - name: Run benchmarks run: | - python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 --hard-fail + python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 Tests: timeout-minutes: 60 diff --git a/utils/benchmarks.py b/utils/benchmarks.py index d412653c866f..d5f4c1d61fbe 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -92,10 +92,14 @@ def run( LOGGER.info('\n') parse_opt() notebook_init() # print system info - c = ['Format', 'Size (MB)', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] + c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] py = pd.DataFrame(y, columns=c) LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') LOGGER.info(str(py if map else py.iloc[:, :2])) + if hard_fail and isinstance(hard_fail, str): + metrics = py['mAP50-95'].array # values to compare to floor + floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n + assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}' return py @@ -141,7 +145,7 @@ def parse_opt(): parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--test', action='store_true', help='test exports only') parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') - parser.add_argument('--hard-fail', action='store_true', help='throw error on benchmark failure') + parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML print_args(vars(opt)) From 1aea74cddbc78e7f79dac07090cb157dfc24dbcc Mon Sep 17 00:00:00 2001 From: VELC Date: Sun, 4 Sep 2022 17:15:53 +0200 Subject: [PATCH 102/326] Add new `--vid-stride` inference parameter for videos (#9256) * fps feature/skip frame added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * predict.py updates * Update dataloaders.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * remove unused attribute Signed-off-by: Glenn Jocher * Cleanup Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update predict.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Rename skip_frame to vid_stride * cleanup * cleanup2 Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- classify/predict.py | 6 ++++-- detect.py | 6 ++++-- utils/dataloaders.py | 15 +++++++++------ 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 76115c75029f..701b5b1ac92d 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -66,6 +66,7 @@ def run( exist_ok=False, # existing project/name ok, do not increment half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images @@ -88,10 +89,10 @@ def run( # Dataloader if webcam: view_img = check_imshow() - dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0])) + dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0])) + dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs @@ -196,6 +197,7 @@ def parse_opt(): parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) diff --git a/detect.py b/detect.py index cf75d0f11c92..69a1bf13aac6 100644 --- a/detect.py +++ b/detect.py @@ -74,6 +74,7 @@ def run( hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images @@ -96,10 +97,10 @@ def run( # Dataloader if webcam: view_img = check_imshow() - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs @@ -236,6 +237,7 @@ def parse_opt(): parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 38ae3399ce26..c1ad1f1a4b83 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -187,7 +187,7 @@ def __iter__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` - def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None): + def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: p = str(Path(p).resolve()) @@ -212,6 +212,7 @@ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None): self.mode = 'image' self.auto = auto self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride if any(videos): self._new_video(videos[0]) # new video else: @@ -232,6 +233,7 @@ def __next__(self): # Read video self.mode = 'video' ret_val, im0 = self.cap.read() + self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride while not ret_val: self.count += 1 self.cap.release() @@ -242,7 +244,7 @@ def __next__(self): ret_val, im0 = self.cap.read() self.frame += 1 - # im0 = self._cv2_rotate(im0) # for use if cv2 auto rotation is False + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: @@ -265,7 +267,7 @@ def _new_video(self, path): # Create a new video capture object self.frame = 0 self.cap = cv2.VideoCapture(path) - self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 @@ -285,11 +287,12 @@ def __len__(self): class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None): + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): torch.backends.cudnn.benchmark = True # faster for fixed-size inference self.mode = 'stream' self.img_size = img_size self.stride = stride + self.vid_stride = vid_stride # video frame-rate stride sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources] n = len(sources) self.sources = [clean_str(x) for x in sources] # clean source names for later @@ -329,11 +332,11 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr def update(self, i, cap, stream): # Read stream `i` frames in daemon thread - n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame + n, f = 0, self.frames[i] # frame number, frame array while cap.isOpened() and n < f: n += 1 cap.grab() # .read() = .grab() followed by .retrieve() - if n % read == 0: + if n % self.vid_stride == 0: success, im = cap.retrieve() if success: self.imgs[i] = im From 32794c130bc0de9cbd1fe34819b7032138bbd81d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 6 Sep 2022 19:00:26 +0300 Subject: [PATCH 103/326] [pre-commit.ci] pre-commit suggestions (#9295) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/executablebooks/mdformat: 0.7.14 → 0.7.16](https://github.com/executablebooks/mdformat/compare/0.7.14...0.7.16) - [github.com/asottile/yesqa: v1.3.0 → v1.4.0](https://github.com/asottile/yesqa/compare/v1.3.0...v1.4.0) - [github.com/PyCQA/flake8: 5.0.2 → 5.0.4](https://github.com/PyCQA/flake8/compare/5.0.2...5.0.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 43aca019feb1..ba8005535397 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -43,7 +43,7 @@ repos: name: YAPF formatting - repo: https://github.com/executablebooks/mdformat - rev: 0.7.14 + rev: 0.7.16 hooks: - id: mdformat name: MD formatting @@ -53,12 +53,12 @@ repos: exclude: "README.md|README_cn.md" - repo: https://github.com/asottile/yesqa - rev: v1.3.0 + rev: v1.4.0 hooks: - id: yesqa - repo: https://github.com/PyCQA/flake8 - rev: 5.0.2 + rev: 5.0.4 hooks: - id: flake8 name: PEP8 From 5a134e06530a8c24fdb9774c2c4ab0b513b08260 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 7 Sep 2022 10:11:30 +0300 Subject: [PATCH 104/326] Replace deprecated `np.int` with `int` (#9307) Per ``` /content/yolov5/utils/dataloaders.py:458: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations ``` Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index c1ad1f1a4b83..d8ef11fd94b4 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -455,7 +455,7 @@ def __init__(self, self.im_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update n = len(shapes) # number of images - bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image self.n = n @@ -497,7 +497,7 @@ def __init__(self, elif mini > 1: shapes[i] = [1, 1 / mini] - self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) self.ims = [None] * n @@ -867,7 +867,7 @@ def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders impo b = x[1:] * [w, h, w, h] # box # b[2:] = b[2:].max() # rectangle to square b[2:] = b[2:] * 1.2 + 3 # pad - b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int) b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) From 903b239f1338e7ad8b12dd8e4a3c53f4f362e07f Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Wed, 7 Sep 2022 11:28:46 -0400 Subject: [PATCH 105/326] Comet Logging and Visualization Integration (#9232) * add comet to logger interface * add comet logger * add support for updated parameters * clean up offline logger creation * update callback args for comet logger * add comet optimizer * add optimizer config * add comet README * update tutorial notebook with Comet section * add option to log class level metrics * add support for class level metrics and confusion matrix * handle errors when adding files to artifacts * fix typo * clean resume workflow * updates for HPO * update comet README * fix typo in comet README * update code snippets in comet README * update comet links in tutorial * updated links * change optimizer batch size param and update comet README image * update comet section in tutorial * use prexisting cmd line flags to configure logger * update artifact upload/download flow * remove come remove comet logger specific cmd line args * move downloading weights into comet logger code * remove extra argparse * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * change checkpoint logging flow to follow offline logger * update resume flow * add comet logger to remote dataset property * update cmd line args in hpo * set types for integer/float env variables * update README * fix typo in README * default to always logging model predictions * Update tutorial.ipynb * Update train.py * Add Comet to Integrations table * Update README.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Ayush Chaurasia Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 59 +-- train.py | 17 +- tutorial.ipynb | 39 +- utils/loggers/__init__.py | 77 +++- utils/loggers/comet/README.md | 256 +++++++++++ utils/loggers/comet/__init__.py | 496 ++++++++++++++++++++++ utils/loggers/comet/comet_utils.py | 150 +++++++ utils/loggers/comet/hpo.py | 118 +++++ utils/loggers/comet/optimizer_config.json | 209 +++++++++ val.py | 4 +- 10 files changed, 1376 insertions(+), 49 deletions(-) create mode 100644 utils/loggers/comet/README.md create mode 100644 utils/loggers/comet/__init__.py create mode 100644 utils/loggers/comet/comet_utils.py create mode 100644 utils/loggers/comet/hpo.py create mode 100644 utils/loggers/comet/optimizer_config.json diff --git a/README.md b/README.md index 1d6b4e153d82..7763d174f92b 100644 --- a/README.md +++ b/README.md @@ -160,46 +160,31 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
-##
Environments
- -Get started in seconds with our verified environments. Click each icon below for details. - -
- - - - - - - - - - - - - - -
##
Integrations
+ +
+ + + - + - + - +
-|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases -|:-:|:-:|:-:|:-:| -|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) +|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases +|:-:|:-:|:-:|:-:|:-:| +|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) ##
Why YOLOv5
@@ -323,6 +308,28 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
+##
Environments
+ +Get started in seconds with our verified environments. Click each icon below for details. + +
+ + + + + + + + + + + + + + +
+ + ##
Contribute
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! diff --git a/train.py b/train.py index 29293aa612cf..e16c17c499f0 100644 --- a/train.py +++ b/train.py @@ -52,6 +52,7 @@ init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers +from utils.loggers.comet.comet_utils import check_comet_resume from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness @@ -330,7 +331,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) + callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss)) if callbacks.stop_training: return # end batch ------------------------------------------------------------------------------------------------ @@ -465,11 +466,11 @@ def parse_opt(known=False): parser.add_argument('--seed', type=int, default=0, help='Global training seed') parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - # Weights & Biases arguments - parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + # Logger arguments + parser.add_argument('--entity', default=None, help='Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use') return parser.parse_known_args()[0] if known else parser.parse_args() @@ -481,8 +482,8 @@ def main(opt, callbacks=Callbacks()): check_git_status() check_requirements() - # Resume - if opt.resume and not (check_wandb_resume(opt) or opt.evolve): # resume from specified or most recent last.pt + # Resume (from specified or most recent last.pt) + if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) or opt.evolve: last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml opt_data = opt.data # original dataset diff --git a/tutorial.ipynb b/tutorial.ipynb index 12840063b1f1..957437b2be6d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -413,7 +413,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -465,7 +465,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -535,7 +535,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -566,7 +566,7 @@ "# Validate YOLOv5s on COCO val\n", "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -653,11 +653,14 @@ "cell_type": "code", "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'ClearML', 'W&B']\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML', 'W&B']\n", "\n", "if logger == 'TensorBoard':\n", " %load_ext tensorboard\n", " %tensorboard --logdir runs/train\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", " %pip install -q clearml && clearml-init\n", "elif logger == 'W&B':\n", @@ -683,7 +686,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -857,6 +860,28 @@ "# 4. Visualize" ] }, + { + "cell_type": "markdown", + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ], + "metadata": { + "id": "nWOsI5wJR1o3" + } + }, { "cell_type": "markdown", "source": [ @@ -1096,4 +1121,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 3aee35844f52..f29debb76907 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -17,7 +17,7 @@ from utils.plots import plot_images, plot_labels, plot_results from utils.torch_utils import de_parallel -LOGGERS = ('csv', 'tb', 'wandb', 'clearml') # *.csv, TensorBoard, Weights & Biases, ClearML +LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML RANK = int(os.getenv('RANK', -1)) try: @@ -41,6 +41,18 @@ except (ImportError, AssertionError): clearml = None +try: + if RANK not in [0, -1]: + comet_ml = None + else: + import comet_ml + + assert hasattr(comet_ml, '__version__') # verify package import not local dir + from utils.loggers.comet import CometLogger + +except (ModuleNotFoundError, ImportError, AssertionError): + comet_ml = None + class Loggers(): # YOLOv5 Loggers class @@ -80,7 +92,10 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, prefix = colorstr('ClearML: ') s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" self.logger.info(s) - + if not comet_ml: + prefix = colorstr('Comet: ') + s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" + self.logger.info(s) # TensorBoard s = self.save_dir if 'tb' in self.include and not self.opt.evolve: @@ -107,6 +122,18 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, else: self.clearml = None + # Comet + if comet_ml and 'comet' in self.include: + if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): + run_id = self.opt.resume.split("/")[-1] + self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) + + else: + self.comet_logger = CometLogger(self.opt, self.hyp) + + else: + self.comet_logger = None + @property def remote_dataset(self): # Get data_dict if custom dataset artifact link is provided @@ -115,12 +142,18 @@ def remote_dataset(self): data_dict = self.clearml.data_dict if self.wandb: data_dict = self.wandb.data_dict + if self.comet_logger: + data_dict = self.comet_logger.data_dict return data_dict def on_train_start(self): - # Callback runs on train start - pass + if self.comet_logger: + self.comet_logger.on_train_start() + + def on_pretrain_routine_start(self): + if self.comet_logger: + self.comet_logger.on_pretrain_routine_start() def on_pretrain_routine_end(self, labels, names): # Callback runs on pre-train routine end @@ -131,8 +164,11 @@ def on_pretrain_routine_end(self, labels, names): self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) # if self.clearml: # pass # ClearML saves these images automatically using hooks + if self.comet_logger: + self.comet_logger.on_pretrain_routine_end(paths) - def on_train_batch_end(self, model, ni, imgs, targets, paths): + def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): + log_dict = dict(zip(self.keys[0:3], vals)) # Callback runs on train batch end # ni: number integrated batches (since train start) if self.plots: @@ -148,11 +184,21 @@ def on_train_batch_end(self, model, ni, imgs, targets, paths): if self.clearml: self.clearml.log_debug_samples(files, title='Mosaics') + if self.comet_logger: + self.comet_logger.on_train_batch_end(log_dict, step=ni) + def on_train_epoch_end(self, epoch): # Callback runs on train epoch end if self.wandb: self.wandb.current_epoch = epoch + 1 + if self.comet_logger: + self.comet_logger.on_train_epoch_end(epoch) + + def on_val_start(self): + if self.comet_logger: + self.comet_logger.on_val_start() + def on_val_image_end(self, pred, predn, path, names, im): # Callback runs on val image end if self.wandb: @@ -160,7 +206,11 @@ def on_val_image_end(self, pred, predn, path, names, im): if self.clearml: self.clearml.log_image_with_boxes(path, pred, names, im) - def on_val_end(self): + def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): + if self.comet_logger: + self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out) + + def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): # Callback runs on val end if self.wandb or self.clearml: files = sorted(self.save_dir.glob('val*.jpg')) @@ -169,6 +219,9 @@ def on_val_end(self): if self.clearml: self.clearml.log_debug_samples(files, title='Validation') + if self.comet_logger: + self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) + def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): # Callback runs at the end of each fit (train+val) epoch x = dict(zip(self.keys, vals)) @@ -199,6 +252,9 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): self.clearml.current_epoch_logged_images = set() # reset epoch image limit self.clearml.current_epoch += 1 + if self.comet_logger: + self.comet_logger.on_fit_epoch_end(x, epoch=epoch) + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): # Callback runs on model save event if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: @@ -209,6 +265,9 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): model_name='Latest Model', auto_delete_file=False) + if self.comet_logger: + self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) + def on_train_end(self, last, best, epoch, results): # Callback runs on training end, i.e. saving best model if self.plots: @@ -237,10 +296,16 @@ def on_train_end(self, last, best, epoch, results): name='Best Model', auto_delete_file=False) + if self.comet_logger: + final_results = dict(zip(self.keys[3:10], results)) + self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results) + def on_params_update(self, params: dict): # Update hyperparams or configs of the experiment if self.wandb: self.wandb.wandb_run.config.update(params, allow_val_change=True) + if self.comet_logger: + self.comet_logger.on_params_update(params) class GenericLogger: diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md new file mode 100644 index 000000000000..7b0b8e0e2f09 --- /dev/null +++ b/utils/loggers/comet/README.md @@ -0,0 +1,256 @@ + + +# YOLOv5 with Comet + +This guide will cover how to use YOLOv5 with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + +# About Comet + +Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. + +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! +Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! + +# Getting Started + +## Install Comet + +```shell +pip install comet_ml +``` + +## Configure Comet Credentials + +There are two ways to configure Comet with YOLOv5. + +You can either set your credentials through enviroment variables + +**Environment Variables** + +```shell +export COMET_API_KEY= +export COMET_PROJECT_NAME= # This will default to 'yolov5' +``` + +Or create a `.comet.config` file in your working directory and set your credentials there. + +**Comet Configuration File** + +``` +[comet] +api_key= +project_name= # This will default to 'yolov5' +``` + +## Run the Training Script + +```shell +# Train YOLOv5s on COCO128 for 5 epochs +python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt +``` + +That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI + +yolo-ui + +# Try out an Example! +Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + +Or better yet, try it out yourself in this Colab Notebook + +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) + +# Log automatically + +By default, Comet will log the following items + +## Metrics +- Box Loss, Object Loss, Classification Loss for the training and validation data +- mAP_0.5, mAP_0.5:0.95 metrics for the validation data. +- Precision and Recall for the validation data + +## Parameters + +- Model Hyperparameters +- All parameters passed through the command line options + +## Visualizations + +- Confusion Matrix of the model predictions on the validation data +- Plots for the PR and F1 curves across all classes +- Correlogram of the Class Labels + +# Configure Comet Logging + +Comet can be configured to log additional data either through command line flags passed to the training script +or through environment variables. + +```shell +export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online +export COMET_MODEL_NAME= #Set the name for the saved model. Defaults to yolov5 +export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true +export COMET_MAX_IMAGE_UPLOADS= # Controls how many total image predictions to log to Comet. Defaults to 100. +export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false +export COMET_DEFAULT_CHECKPOINT_FILENAME= # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt' +export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false. +export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions +``` + +## Logging Checkpoints with Comet + +Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the +logged checkpoints to Comet based on the interval value provided by `save-period` + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--save-period 1 +``` + +## Logging Model Predictions + +By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet. + +You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch. + +**Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. + +Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--bbox_interval 2 +``` + +### Controlling the number of Prediction Images logged to Comet + +When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable. + +```shell +env COMET_MAX_IMAGE_UPLOADS=200 python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--bbox_interval 1 +``` + +### Logging Class Level Metrics + +Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class. + +```shell +env COMET_LOG_PER_CLASS_METRICS=true python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt +``` + +## Uploading a Dataset to Comet Artifacts + +If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag. + +The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--upload_dataset +``` + +You can find the uploaded dataset in the Artifacts tab in your Comet Workspace +artifact-1 + +You can preview the data directly in the Comet UI. +artifact-2 + +Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file +artifact-3 + +### Using a saved Artifact + +If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL. + +``` +# contents of artifact.yaml file +path: "comet:///:" +``` +Then pass this file to your training script in the following way + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data artifact.yaml \ +--weights yolov5s.pt +``` + +Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset. +artifact-4 + +## Resuming a Training Run + +If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path. + +The Run Path has the following format `comet:////`. + +This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI + +```shell +python train.py \ +--resume "comet://" +``` + +## Hyperparameter Search with the Comet Optimizer + +YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI. + +### Configuring an Optimizer Sweep + +To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json` + +```shell +python utils/loggers/comet/hpo.py \ + --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" +``` + +The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after +the script. + +```shell +python utils/loggers/comet/hpo.py \ + --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \ + --save-period 1 \ + --bbox_interval 1 +``` + +### Running a Sweep in Parallel + +```shell +comet optimizer -j utils/loggers/comet/hpo.py \ + utils/loggers/comet/optimizer_config.json" +``` + +### Visualizing Results + +Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + +hyperparameter-yolo \ No newline at end of file diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py new file mode 100644 index 000000000000..b168687dd7b2 --- /dev/null +++ b/utils/loggers/comet/__init__.py @@ -0,0 +1,496 @@ +import glob +import json +import logging +import os +import sys +from pathlib import Path + +logger = logging.getLogger(__name__) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +try: + import comet_ml + + # Project Configuration + config = comet_ml.config.get_config() + COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") +except (ModuleNotFoundError, ImportError): + comet_ml = None + COMET_PROJECT_NAME = None + +import torch +import torchvision.transforms as T +import yaml + +from utils.dataloaders import img2label_paths +from utils.general import check_dataset, scale_coords, xywh2xyxy +from utils.metrics import box_iou + +COMET_PREFIX = "comet://" + +COMET_MODE = os.getenv("COMET_MODE", "online") + +# Model Saving Settings +COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") + +# Dataset Artifact Settings +COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" + +# Evaluation Settings +COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" +COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" +COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) + +# Confusion Matrix Settings +CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) +IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) + +# Batch Logging Settings +COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" +COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" + +RANK = int(os.getenv("RANK", -1)) + +to_pil = T.ToPILImage() + + +class CometLogger: + """Log metrics, parameters, source code, models and much more + with Comet + """ + + def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + self.job_type = job_type + self.opt = opt + self.hyp = hyp + + # Comet Flags + self.comet_mode = COMET_MODE + + self.save_model = opt.save_period > -1 + self.model_name = COMET_MODEL_NAME + + # Batch Logging Settings + self.log_batch_metrics = COMET_LOG_BATCH_METRICS + self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL + + # Dataset Artifact Settings + self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET + self.resume = self.opt.resume + + # Default parameters to pass to Experiment objects + self.default_experiment_kwargs = { + "log_code": False, + "log_env_gpu": True, + "log_env_cpu": True, + "project_name": COMET_PROJECT_NAME,} + self.default_experiment_kwargs.update(experiment_kwargs) + self.experiment = self._get_experiment(self.comet_mode, run_id) + + self.data_dict = self.check_dataset(self.opt.data) + self.class_names = self.data_dict["names"] + self.num_classes = self.data_dict["nc"] + + self.logged_images_count = 0 + self.max_images = COMET_MAX_IMAGE_UPLOADS + + if run_id is None: + self.experiment.log_other("Created from", "YOLOv5") + if not isinstance(self.experiment, comet_ml.OfflineExperiment): + workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] + self.experiment.log_other( + "Run Path", + f"{workspace}/{project_name}/{experiment_id}", + ) + self.log_parameters(vars(opt)) + self.log_parameters(self.opt.hyp) + self.log_asset_data( + self.opt.hyp, + name="hyperparameters.json", + metadata={"type": "hyp-config-file"}, + ) + self.log_asset( + f"{self.opt.save_dir}/opt.yaml", + metadata={"type": "opt-config-file"}, + ) + + self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX + + if hasattr(self.opt, "conf_thres"): + self.conf_thres = self.opt.conf_thres + else: + self.conf_thres = CONF_THRES + if hasattr(self.opt, "iou_thres"): + self.iou_thres = self.opt.iou_thres + else: + self.iou_thres = IOU_THRES + + self.comet_log_predictions = COMET_LOG_PREDICTIONS + if self.opt.bbox_interval == -1: + self.comet_log_prediction_interval = self.opt.epochs // 10 if self.opt.epochs < 10 else 1 + else: + self.comet_log_prediction_interval = self.opt.bbox_interval + + if self.comet_log_predictions: + self.metadata_dict = {} + + self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS + + self.experiment.log_others({ + "comet_mode": COMET_MODE, + "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, + "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, + "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, + "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, + "comet_model_name": COMET_MODEL_NAME,}) + + # Check if running the Experiment with the Comet Optimizer + if hasattr(self.opt, "comet_optimizer_id"): + self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) + self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) + self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) + self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) + + def _get_experiment(self, mode, experiment_id=None): + if mode == "offline": + if experiment_id is not None: + return comet_ml.ExistingOfflineExperiment( + previous_experiment=experiment_id, + **self.default_experiment_kwargs, + ) + + return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,) + + else: + try: + if experiment_id is not None: + return comet_ml.ExistingExperiment( + previous_experiment=experiment_id, + **self.default_experiment_kwargs, + ) + + return comet_ml.Experiment(**self.default_experiment_kwargs) + + except ValueError: + logger.warning("COMET WARNING: " + "Comet credentials have not been set. " + "Comet will default to offline logging. " + "Please set your credentials to enable online logging.") + return self._get_experiment("offline", experiment_id) + + return + + def log_metrics(self, log_dict, **kwargs): + self.experiment.log_metrics(log_dict, **kwargs) + + def log_parameters(self, log_dict, **kwargs): + self.experiment.log_parameters(log_dict, **kwargs) + + def log_asset(self, asset_path, **kwargs): + self.experiment.log_asset(asset_path, **kwargs) + + def log_asset_data(self, asset, **kwargs): + self.experiment.log_asset_data(asset, **kwargs) + + def log_image(self, img, **kwargs): + self.experiment.log_image(img, **kwargs) + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + if not self.save_model: + return + + model_metadata = { + "fitness_score": fitness_score[-1], + "epochs_trained": epoch + 1, + "save_period": opt.save_period, + "total_epochs": opt.epochs,} + + model_files = glob.glob(f"{path}/*.pt") + for model_path in model_files: + name = Path(model_path).name + + self.experiment.log_model( + self.model_name, + file_or_folder=model_path, + file_name=name, + metadata=model_metadata, + overwrite=True, + ) + + def check_dataset(self, data_file): + with open(data_file) as f: + data_config = yaml.safe_load(f) + + if data_config['path'].startswith(COMET_PREFIX): + path = data_config['path'].replace(COMET_PREFIX, "") + data_dict = self.download_dataset_artifact(path) + + return data_dict + + self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) + + return check_dataset(data_file) + + def log_predictions(self, image, labelsn, path, shape, predn): + if self.logged_images_count >= self.max_images: + return + detections = predn[predn[:, 4] > self.conf_thres] + iou = box_iou(labelsn[:, 1:], detections[:, :4]) + mask, _ = torch.where(iou > self.iou_thres) + if len(mask) == 0: + return + + filtered_detections = detections[mask] + filtered_labels = labelsn[mask] + + processed_image = (image * 255).to(torch.uint8) + + image_id = path.split("/")[-1].split(".")[0] + image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" + self.log_image(to_pil(processed_image), name=image_name) + + metadata = [] + for cls, *xyxy in filtered_labels.tolist(): + metadata.append({ + "label": f"{self.class_names[int(cls)]}-gt", + "score": 100, + "box": { + "x": xyxy[0], + "y": xyxy[1], + "x2": xyxy[2], + "y2": xyxy[3]},}) + for *xyxy, conf, cls in filtered_detections.tolist(): + metadata.append({ + "label": f"{self.class_names[int(cls)]}", + "score": conf * 100, + "box": { + "x": xyxy[0], + "y": xyxy[1], + "x2": xyxy[2], + "y2": xyxy[3]},}) + + self.metadata_dict[image_name] = metadata + self.logged_images_count += 1 + + return + + def preprocess_prediction(self, image, labels, shape, pred): + nl, _ = labels.shape[0], pred.shape[0] + + # Predictions + if self.opt.single_cls: + pred[:, 5] = 0 + + predn = pred.clone() + scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) + + labelsn = None + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_coords(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred + + return predn, labelsn + + def add_assets_to_artifact(self, artifact, path, asset_path, split): + img_paths = sorted(glob.glob(f"{asset_path}/*")) + label_paths = img2label_paths(img_paths) + + for image_file, label_file in zip(img_paths, label_paths): + image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) + + try: + artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) + artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) + except ValueError as e: + logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') + logger.error(f"COMET ERROR: {e}") + continue + + return artifact + + def upload_dataset_artifact(self): + dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") + path = str((ROOT / Path(self.data_dict["path"])).resolve()) + + metadata = self.data_dict.copy() + for key in ["train", "val", "test"]: + split_path = metadata.get(key) + if split_path is not None: + metadata[key] = split_path.replace(path, "") + + artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) + for key in metadata.keys(): + if key in ["train", "val", "test"]: + if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): + continue + + asset_path = self.data_dict.get(key) + if asset_path is not None: + artifact = self.add_assets_to_artifact(artifact, path, asset_path, key) + + self.experiment.log_artifact(artifact) + + return + + def download_dataset_artifact(self, artifact_path): + logged_artifact = self.experiment.get_artifact(artifact_path) + artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name) + logged_artifact.download(artifact_save_dir) + + metadata = logged_artifact.metadata + data_dict = metadata.copy() + data_dict["path"] = artifact_save_dir + data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + + data_dict = self.update_data_paths(data_dict) + return data_dict + + def update_data_paths(self, data_dict): + path = data_dict.get("path", "") + + for split in ["train", "val", "test"]: + if data_dict.get(split): + split_path = data_dict.get(split) + data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ + f"{path}/{x}" for x in split_path]) + + return data_dict + + def on_pretrain_routine_end(self, paths): + if self.opt.resume: + return + + for path in paths: + self.log_asset(str(path)) + + if self.upload_dataset: + if not self.resume: + self.upload_dataset_artifact() + + return + + def on_train_start(self): + self.log_parameters(self.hyp) + + def on_train_epoch_start(self): + return + + def on_train_epoch_end(self, epoch): + self.experiment.curr_epoch = epoch + + return + + def on_train_batch_start(self): + return + + def on_train_batch_end(self, log_dict, step): + self.experiment.curr_step = step + if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0): + self.log_metrics(log_dict, step=step) + + return + + def on_train_end(self, files, save_dir, last, best, epoch, results): + if self.comet_log_predictions: + curr_epoch = self.experiment.curr_epoch + self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) + + for f in files: + self.log_asset(f, metadata={"epoch": epoch}) + self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) + + if not self.opt.evolve: + model_path = str(best if best.exists() else last) + name = Path(model_path).name + if self.save_model: + self.experiment.log_model( + self.model_name, + file_or_folder=model_path, + file_name=name, + overwrite=True, + ) + + # Check if running Experiment with Comet Optimizer + if hasattr(self.opt, 'comet_optimizer_id'): + metric = results.get(self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_metric_value', metric) + + self.finish_run() + + def on_val_start(self): + return + + def on_val_batch_start(self): + return + + def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs): + if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)): + return + + for si, pred in enumerate(outputs): + if len(pred) == 0: + continue + + image = images[si] + labels = targets[targets[:, 0] == si, 1:] + shape = shapes[si] + path = paths[si] + predn, labelsn = self.preprocess_prediction(image, labels, shape, pred) + if labelsn is not None: + self.log_predictions(image, labelsn, path, shape, predn) + + return + + def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): + if self.comet_log_per_class_metrics: + if self.num_classes > 1: + for i, c in enumerate(ap_class): + class_name = self.class_names[c] + self.experiment.log_metrics( + { + 'mAP@.5': ap50[i], + 'mAP@.5:.95': ap[i], + 'precision': p[i], + 'recall': r[i], + 'f1': f1[i], + 'true_positives': tp[i], + 'false_positives': fp[i], + 'support': nt[c]}, + prefix=class_name) + + if self.comet_log_confusion_matrix: + epoch = self.experiment.curr_epoch + class_names = list(self.class_names.values()) + class_names.append("background") + num_classes = len(class_names) + + self.experiment.log_confusion_matrix( + matrix=confusion_matrix.matrix, + max_categories=num_classes, + labels=class_names, + epoch=epoch, + column_label='Actual Category', + row_label='Predicted Category', + file_name=f"confusion-matrix-epoch-{epoch}.json", + ) + + def on_fit_epoch_end(self, result, epoch): + self.log_metrics(result, epoch=epoch) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + + def on_params_update(self, params): + self.log_parameters(params) + + def finish_run(self): + self.experiment.end() diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py new file mode 100644 index 000000000000..3cbd45156b57 --- /dev/null +++ b/utils/loggers/comet/comet_utils.py @@ -0,0 +1,150 @@ +import logging +import os +from urllib.parse import urlparse + +try: + import comet_ml +except (ModuleNotFoundError, ImportError): + comet_ml = None + +import yaml + +logger = logging.getLogger(__name__) + +COMET_PREFIX = "comet://" +COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") + + +def download_model_checkpoint(opt, experiment): + model_dir = f"{opt.project}/{experiment.name}" + os.makedirs(model_dir, exist_ok=True) + + model_name = COMET_MODEL_NAME + model_asset_list = experiment.get_model_asset_list(model_name) + + if len(model_asset_list) == 0: + logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") + return + + model_asset_list = sorted( + model_asset_list, + key=lambda x: x["step"], + reverse=True, + ) + logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} + + resource_url = urlparse(opt.weights) + checkpoint_filename = resource_url.query + + if checkpoint_filename: + asset_id = logged_checkpoint_map.get(checkpoint_filename) + else: + asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME) + checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME + + if asset_id is None: + logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") + return + + try: + logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") + asset_filename = checkpoint_filename + + model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + model_download_path = f"{model_dir}/{asset_filename}" + with open(model_download_path, "wb") as f: + f.write(model_binary) + + opt.weights = model_download_path + + except Exception as e: + logger.warning("COMET WARNING: Unable to download checkpoint from Comet") + logger.exception(e) + + +def set_opt_parameters(opt, experiment): + """Update the opts Namespace with parameters + from Comet's ExistingExperiment when resuming a run + + Args: + opt (argparse.Namespace): Namespace of command line options + experiment (comet_ml.APIExperiment): Comet API Experiment object + """ + asset_list = experiment.get_asset_list() + resume_string = opt.resume + + for asset in asset_list: + if asset["fileName"] == "opt.yaml": + asset_id = asset["assetId"] + asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + opt_dict = yaml.safe_load(asset_binary) + for key, value in opt_dict.items(): + setattr(opt, key, value) + opt.resume = resume_string + + # Save hyperparameters to YAML file + # Necessary to pass checks in training script + save_dir = f"{opt.project}/{experiment.name}" + os.makedirs(save_dir, exist_ok=True) + + hyp_yaml_path = f"{save_dir}/hyp.yaml" + with open(hyp_yaml_path, "w") as f: + yaml.dump(opt.hyp, f) + opt.hyp = hyp_yaml_path + + +def check_comet_weights(opt): + """Downloads model weights from Comet and updates the + weights path to point to saved weights location + + Args: + opt (argparse.Namespace): Command Line arguments passed + to YOLOv5 training script + + Returns: + None/bool: Return True if weights are successfully downloaded + else return None + """ + if comet_ml is None: + return + + if isinstance(opt.weights, str): + if opt.weights.startswith(COMET_PREFIX): + api = comet_ml.API() + resource = urlparse(opt.weights) + experiment_path = f"{resource.netloc}{resource.path}" + experiment = api.get(experiment_path) + download_model_checkpoint(opt, experiment) + return True + + return None + + +def check_comet_resume(opt): + """Restores run parameters to its original state based on the model checkpoint + and logged Experiment parameters. + + Args: + opt (argparse.Namespace): Command Line arguments passed + to YOLOv5 training script + + Returns: + None/bool: Return True if the run is restored successfully + else return None + """ + if comet_ml is None: + return + + if isinstance(opt.resume, str): + if opt.resume.startswith(COMET_PREFIX): + api = comet_ml.API() + resource = urlparse(opt.resume) + experiment_path = f"{resource.netloc}{resource.path}" + experiment = api.get(experiment_path) + set_opt_parameters(opt, experiment) + download_model_checkpoint(opt, experiment) + + return True + + return None diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py new file mode 100644 index 000000000000..eab4df9978cf --- /dev/null +++ b/utils/loggers/comet/hpo.py @@ -0,0 +1,118 @@ +import argparse +import json +import logging +import os +import sys +from pathlib import Path + +import comet_ml + +logger = logging.getLogger(__name__) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from train import parse_opt, train +from utils.callbacks import Callbacks +from utils.general import increment_path +from utils.torch_utils import select_device + +# Project Configuration +config = comet_ml.config.get_config() +COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") + + +def get_args(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + + # Weights & Biases arguments + parser.add_argument('--entity', default=None, help='W&B: Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + + # Comet Arguments + parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") + parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") + parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") + parser.add_argument("--comet_optimizer_workers", + type=int, + default=1, + help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def run(parameters, opt): + hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} + + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.batch_size = parameters.get("batch_size") + opt.epochs = parameters.get("epochs") + + device = select_device(opt.device, batch_size=opt.batch_size) + train(hyp_dict, opt, device, callbacks=Callbacks()) + + +if __name__ == "__main__": + opt = get_args(known=True) + + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.project = str(opt.project) + + optimizer_id = os.getenv("COMET_OPTIMIZER_ID") + if optimizer_id is None: + with open(opt.comet_optimizer_config) as f: + optimizer_config = json.load(f) + optimizer = comet_ml.Optimizer(optimizer_config) + else: + optimizer = comet_ml.Optimizer(optimizer_id) + + opt.comet_optimizer_id = optimizer.id + status = optimizer.status() + + opt.comet_optimizer_objective = status["spec"]["objective"] + opt.comet_optimizer_metric = status["spec"]["metric"] + + logger.info("COMET INFO: Starting Hyperparameter Sweep") + for parameter in optimizer.get_parameters(): + run(parameter["parameters"], opt) diff --git a/utils/loggers/comet/optimizer_config.json b/utils/loggers/comet/optimizer_config.json new file mode 100644 index 000000000000..83ddddab6f20 --- /dev/null +++ b/utils/loggers/comet/optimizer_config.json @@ -0,0 +1,209 @@ +{ + "algorithm": "random", + "parameters": { + "anchor_t": { + "type": "discrete", + "values": [ + 2, + 8 + ] + }, + "batch_size": { + "type": "discrete", + "values": [ + 16, + 32, + 64 + ] + }, + "box": { + "type": "discrete", + "values": [ + 0.02, + 0.2 + ] + }, + "cls": { + "type": "discrete", + "values": [ + 0.2 + ] + }, + "cls_pw": { + "type": "discrete", + "values": [ + 0.5 + ] + }, + "copy_paste": { + "type": "discrete", + "values": [ + 1 + ] + }, + "degrees": { + "type": "discrete", + "values": [ + 0, + 45 + ] + }, + "epochs": { + "type": "discrete", + "values": [ + 5 + ] + }, + "fl_gamma": { + "type": "discrete", + "values": [ + 0 + ] + }, + "fliplr": { + "type": "discrete", + "values": [ + 0 + ] + }, + "flipud": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_h": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_s": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_v": { + "type": "discrete", + "values": [ + 0 + ] + }, + "iou_t": { + "type": "discrete", + "values": [ + 0.7 + ] + }, + "lr0": { + "type": "discrete", + "values": [ + 1e-05, + 0.1 + ] + }, + "lrf": { + "type": "discrete", + "values": [ + 0.01, + 1 + ] + }, + "mixup": { + "type": "discrete", + "values": [ + 1 + ] + }, + "momentum": { + "type": "discrete", + "values": [ + 0.6 + ] + }, + "mosaic": { + "type": "discrete", + "values": [ + 0 + ] + }, + "obj": { + "type": "discrete", + "values": [ + 0.2 + ] + }, + "obj_pw": { + "type": "discrete", + "values": [ + 0.5 + ] + }, + "optimizer": { + "type": "categorical", + "values": [ + "SGD", + "Adam", + "AdamW" + ] + }, + "perspective": { + "type": "discrete", + "values": [ + 0 + ] + }, + "scale": { + "type": "discrete", + "values": [ + 0 + ] + }, + "shear": { + "type": "discrete", + "values": [ + 0 + ] + }, + "translate": { + "type": "discrete", + "values": [ + 0 + ] + }, + "warmup_bias_lr": { + "type": "discrete", + "values": [ + 0, + 0.2 + ] + }, + "warmup_epochs": { + "type": "discrete", + "values": [ + 5 + ] + }, + "warmup_momentum": { + "type": "discrete", + "values": [ + 0, + 0.95 + ] + }, + "weight_decay": { + "type": "discrete", + "values": [ + 0, + 0.001 + ] + } + }, + "spec": { + "maxCombo": 0, + "metric": "metrics/mAP_0.5", + "objective": "maximize" + }, + "trials": 1 +} diff --git a/val.py b/val.py index 5427ee7b3619..665d92f9286d 100644 --- a/val.py +++ b/val.py @@ -259,7 +259,7 @@ def run( plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels plot_images(im, output_to_target(out), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred - callbacks.run('on_val_batch_end') + callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, out) # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy @@ -289,7 +289,7 @@ def run( # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - callbacks.run('on_val_end') + callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) # Save JSON if save_json and len(jdict): From 5f075eedf221852aab85b4d2b5d98289e13077b4 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 8 Sep 2022 11:17:14 -0400 Subject: [PATCH 106/326] Comet changes (#9328) * add link to comte tutorial from main README * fix prediction interval bug --- README.md | 1 + utils/loggers/comet/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 7763d174f92b..da8bf1dad862 100644 --- a/README.md +++ b/README.md @@ -157,6 +157,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 - [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW - [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW - [Deci Platform](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 NEW +- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet) 🌟 NEW
diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index b168687dd7b2..4ee86dd70d6e 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -133,7 +133,7 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: - self.comet_log_prediction_interval = self.opt.epochs // 10 if self.opt.epochs < 10 else 1 + self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 else: self.comet_log_prediction_interval = self.opt.bbox_interval From 3cd66b1c3863a8524c6cc564029c29ac783f7310 Mon Sep 17 00:00:00 2001 From: robinned <78896580+robinned@users.noreply.github.com> Date: Thu, 8 Sep 2022 12:00:54 -0700 Subject: [PATCH 107/326] Train.py line 486 typo fix (#9330) fixed issue Signed-off-by: robinned <78896580+robinned@users.noreply.github.com> Signed-off-by: robinned <78896580+robinned@users.noreply.github.com> Co-authored-by: Ayush Chaurasia --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index e16c17c499f0..4eff6e5d645a 100644 --- a/train.py +++ b/train.py @@ -483,7 +483,7 @@ def main(opt, callbacks=Callbacks()): check_requirements() # Resume (from specified or most recent last.pt) - if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) or opt.evolve: + if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) and not opt.evolve: last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml opt_data = opt.data # original dataset From 8aa196ce08007aa1033b0e42931c247e1e491321 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=84=E4=B8=8D=E7=9B=88?= <33193090+YellowAndGreen@users.noreply.github.com> Date: Sat, 10 Sep 2022 05:16:07 +0800 Subject: [PATCH 108/326] Add dilated conv support (#9347) * added dilate conv support * added dilate conv support * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py * Update common.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 16 +++++++++------- utils/torch_utils.py | 1 + 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/models/common.py b/models/common.py index 7ac3a4a29672..c30c8ee94777 100644 --- a/models/common.py +++ b/models/common.py @@ -28,18 +28,20 @@ from utils.torch_utils import copy_attr, smart_inference_mode -def autopad(k, p=None): # kernel, padding - # Pad to 'same' +def autopad(k, p=None, d=1): # kernel, padding, dilation + # Pad to 'same' shape outputs + if d > 1: + k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size if p is None: p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad return p class Conv(nn.Module): - # Standard convolution - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): super().__init__() - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) @@ -51,13 +53,13 @@ def forward_fuse(self, x): class DWConv(Conv): - # Depth-wise convolution class + # Depth-wise convolution def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) class DWConvTranspose2d(nn.ConvTranspose2d): - # Depth-wise transpose convolution class + # Depth-wise transpose convolution def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index abf0bbc19a98..8a3366ca3e27 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -251,6 +251,7 @@ def fuse_conv_and_bn(conv, bn): kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, + dilation=conv.dilation, groups=conv.groups, bias=True).requires_grad_(False).to(conv.weight.device) From 24bf9cceb406a7e380bdb9e100417318615a78a1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 11:11:56 +0300 Subject: [PATCH 109/326] Update `check_requirements()` single install (#9353) * Update `check_requirements()` single install Faster install and better conflict resolution with single installation Signed-off-by: Glenn Jocher * Update * Update * Update Signed-off-by: Glenn Jocher --- export.py | 12 ++++++------ models/common.py | 4 ++-- utils/general.py | 48 +++++++++++++++++++++++------------------------- val.py | 4 ++-- 4 files changed, 33 insertions(+), 35 deletions(-) diff --git a/export.py b/export.py index 4d0144af9efb..8fed4d3e3661 100644 --- a/export.py +++ b/export.py @@ -126,7 +126,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' @try_export def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export - check_requirements(('onnx',)) + check_requirements('onnx') import onnx LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') @@ -182,7 +182,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst @try_export def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.inference_engine as ie LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') @@ -198,7 +198,7 @@ def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): @try_export def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export - check_requirements(('coremltools',)) + check_requirements('coremltools') import coremltools as ct LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') @@ -226,7 +226,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose import tensorrt as trt except Exception: if platform.system() == 'Linux': - check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) + check_requirements('nvidia-tensorrt', cmds=['-U --index-url https://pypi.ngc.nvidia.com']) import tensorrt as trt if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 @@ -405,7 +405,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): @try_export def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export - check_requirements(('tensorflowjs',)) + check_requirements('tensorflowjs') import re import tensorflowjs as tfjs @@ -516,7 +516,7 @@ def run( # TensorFlow Exports if any((saved_model, pb, tflite, edgetpu, tfjs)): if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 - check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` + check_requirements('flatbuffers==1.12') # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' f[5], model = export_saved_model(model.cpu(), diff --git a/models/common.py b/models/common.py index c30c8ee94777..0e01b60e81e5 100644 --- a/models/common.py +++ b/models/common.py @@ -347,7 +347,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, stride, names = int(d['stride']), d['names'] elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') - check_requirements(('opencv-python>=4.5.4',)) + check_requirements('opencv-python>=4.5.4') net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') @@ -362,7 +362,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, stride, names = int(meta['stride']), eval(meta['names']) elif xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ from openvino.runtime import Core, Layout, get_batch ie = Core() if not Path(w).is_file(): # if not *.xml diff --git a/utils/general.py b/utils/general.py index cae63fd9dd21..629df32ebc54 100755 --- a/utils/general.py +++ b/utils/general.py @@ -342,39 +342,37 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @TryExcept() def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()): - # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages) + # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version - if isinstance(requirements, (str, Path)): # requirements.txt file - file = Path(requirements) + if isinstance(requirements, Path): # requirements.txt file + file = requirements assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] - else: # list or tuple of packages - requirements = [x for x in requirements if x not in exclude] + elif isinstance(requirements, str): + requirements = [requirements] - n = 0 # number of packages updates - for i, r in enumerate(requirements): + s = '' + n = 0 + for r in requirements: try: pkg.require(r) - except Exception: # DistributionNotFound or VersionConflict if requirements not met - s = f"{prefix} {r} not found and is required by YOLOv5" - if install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{s}, attempting auto-update...") - try: - assert check_online(), f"'pip install {r}' skipped (offline)" - LOGGER.info(check_output(f'pip install "{r}" {cmds[i] if cmds else ""}', shell=True).decode()) - n += 1 - except Exception as e: - LOGGER.warning(f'{prefix} {e}') - else: - LOGGER.info(f'{s}. Please install and rerun your command.') - - if n: # if packages updated - source = file.resolve() if 'file' in locals() else requirements - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ - f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - LOGGER.info(s) + except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met + s += f'"{r}" ' + n += 1 + + if s and install and AUTOINSTALL: # check environment variable + LOGGER.info(f"{prefix} YOLOv5 requirements {s}not found, attempting AutoUpdate...") + try: + assert check_online(), "AutoUpdate skipped (offline)" + LOGGER.info(check_output(f'pip install {s} {" ".join(cmds) if cmds else ""}', shell=True).decode()) + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + LOGGER.info(s) + except Exception as e: + LOGGER.warning(f'{prefix} {e}') def check_img_size(imgsz, s=32, floor=0): diff --git a/val.py b/val.py index 665d92f9286d..fed5e21577e5 100644 --- a/val.py +++ b/val.py @@ -301,7 +301,7 @@ def run( json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements(['pycocotools']) + check_requirements('pycocotools') from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval @@ -360,7 +360,7 @@ def parse_opt(): def main(opt): - check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 From e9ddc5b5274be1d795a28542159d7c9293efccea Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 12:00:16 +0300 Subject: [PATCH 110/326] Update `check_requirements(args, cmds='')` (#9355) * Update `check_requirements(args, cmds='')` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 2 +- utils/general.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/export.py b/export.py index 8fed4d3e3661..cdf5dcddd07a 100644 --- a/export.py +++ b/export.py @@ -226,7 +226,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose import tensorrt as trt except Exception: if platform.system() == 'Linux': - check_requirements('nvidia-tensorrt', cmds=['-U --index-url https://pypi.ngc.nvidia.com']) + check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') import tensorrt as trt if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 diff --git a/utils/general.py b/utils/general.py index 629df32ebc54..187d2c6b2d4a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -341,13 +341,13 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @TryExcept() -def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()): +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version if isinstance(requirements, Path): # requirements.txt file - file = requirements - assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." + file = requirements.resolve() + assert file.exists(), f"{prefix} {file} not found, check failed." with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] elif isinstance(requirements, str): @@ -366,8 +366,8 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta LOGGER.info(f"{prefix} YOLOv5 requirements {s}not found, attempting AutoUpdate...") try: assert check_online(), "AutoUpdate skipped (offline)" - LOGGER.info(check_output(f'pip install {s} {" ".join(cmds) if cmds else ""}', shell=True).decode()) - source = file.resolve() if 'file' in locals() else requirements + LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) + source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" LOGGER.info(s) From 57ef676af2358d70bd5902059531655789135510 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 12:14:31 +0300 Subject: [PATCH 111/326] Update `check_requirements()` multiple string (#9356) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 187d2c6b2d4a..33232efac9fd 100755 --- a/utils/general.py +++ b/utils/general.py @@ -363,7 +363,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta n += 1 if s and install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{prefix} YOLOv5 requirements {s}not found, attempting AutoUpdate...") + LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: assert check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) From e3e5122f82b0d1f24c11a90b2377fbb5a1673274 Mon Sep 17 00:00:00 2001 From: Katteria <39751846+kisaragychihaya@users.noreply.github.com> Date: Sat, 10 Sep 2022 17:20:46 +0800 Subject: [PATCH 112/326] Add PaddlePaddle export and inference (#9240) * Add PaddlePaddle Model Export Test on Yolov5 DockerEnviroment with paddlepaddle-gpu v2.2 Signed-off-by: Katteria <39751846+kisaragychihaya@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup Paddle Export Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Use PyTorch2Paddle Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Paddle no longer requires ONNX Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update benchmarks.py Signed-off-by: Glenn Jocher * Add inference code of PaddlePaddle Signed-off-by: Katteria <39751846+kisaragychihaya@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py Signed-off-by: Katteria <39751846+kisaragychihaya@users.noreply.github.com> * Update common.py Signed-off-by: Glenn Jocher * Add paddlepaddle-gpu install if cuda Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Katteria <39751846+kisaragychihaya@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- export.py | 72 +++++++++++++++++----------- models/common.py | 114 ++++++++++++++++++++++++++------------------ utils/benchmarks.py | 2 +- 3 files changed, 112 insertions(+), 76 deletions(-) diff --git a/export.py b/export.py index cdf5dcddd07a..262b11a1a268 100644 --- a/export.py +++ b/export.py @@ -15,6 +15,7 @@ TensorFlow Lite | `tflite` | yolov5s.tflite TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite TensorFlow.js | `tfjs` | yolov5s_web_model/ +PaddlePaddle | `paddle` | yolov5s_paddle_model/ Requirements: $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU @@ -54,7 +55,6 @@ import pandas as pd import torch -import yaml from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).resolve() @@ -68,7 +68,7 @@ from models.yolo import ClassificationModel, Detect from utils.dataloaders import LoadImages from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, - check_yaml, colorstr, file_size, get_default_args, print_args, url2file) + check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) from utils.torch_utils import select_device, smart_inference_mode @@ -85,7 +85,8 @@ def export_formats(): ['TensorFlow GraphDef', 'pb', '.pb', True, True], ['TensorFlow Lite', 'tflite', '.tflite', True, False], ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], - ['TensorFlow.js', 'tfjs', '_web_model', False, False],] + ['TensorFlow.js', 'tfjs', '_web_model', False, False], + ['PaddlePaddle', 'paddle', '_paddle_model', True, True],] return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) @@ -180,7 +181,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst @try_export -def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): +def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.inference_engine as ie @@ -189,9 +190,23 @@ def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): f = str(file).replace('.pt', f'_openvino_model{os.sep}') cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.check_output(cmd.split()) # export - with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: - yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml + subprocess.run(cmd.split(), check=True, env=os.environ) # export + yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml + return f, None + + +@try_export +def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): + # YOLOv5 Paddle export + check_requirements(('paddlepaddle', 'x2paddle')) + import x2paddle + from x2paddle.convert import pytorch2paddle + + LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') + f = str(file).replace('.pt', f'_paddle_model{os.sep}') + + pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export + yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None @@ -464,7 +479,7 @@ def run( fmts = tuple(export_formats()['Argument'][1:]) # --include arguments flags = [x in include for x in fmts] assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' - jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = flags # export booleans + jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights # Load PyTorch model @@ -497,47 +512,48 @@ def run( if half and not coreml: im, model = im.half(), model.half() # to FP16 shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape + metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") # Exports - f = [''] * 10 # exported filenames + f = [''] * len(fmts) # exported filenames warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - if jit: + if jit: # TorchScript f[0], _ = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) if onnx or xml: # OpenVINO requires ONNX f[2], _ = export_onnx(model, im, file, opset, train, dynamic, simplify) if xml: # OpenVINO - f[3], _ = export_openvino(model, file, half) - if coreml: + f[3], _ = export_openvino(file, metadata, half) + if coreml: # CoreML f[4], _ = export_coreml(model, im, file, int8, half) - - # TensorFlow Exports - if any((saved_model, pb, tflite, edgetpu, tfjs)): + if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements('flatbuffers==1.12') # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' - f[5], model = export_saved_model(model.cpu(), - im, - file, - dynamic, - tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, - topk_per_class=topk_per_class, - topk_all=topk_all, - iou_thres=iou_thres, - conf_thres=conf_thres, - keras=keras) + f[5], s_model = export_saved_model(model.cpu(), + im, + file, + dynamic, + tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, + topk_per_class=topk_per_class, + topk_all=topk_all, + iou_thres=iou_thres, + conf_thres=conf_thres, + keras=keras) if pb or tfjs: # pb prerequisite to tfjs - f[6], _ = export_pb(model, file) + f[6], _ = export_pb(s_model, file) if tflite or edgetpu: - f[7], _ = export_tflite(model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) + f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) if edgetpu: f[8], _ = export_edgetpu(file) if tfjs: f[9], _ = export_tfjs(file) + if paddle: # PaddlePaddle + f[10], _ = export_paddle(model, im, file, metadata) # Finish f = [str(x) for x in f if x] # filter out '' and None diff --git a/models/common.py b/models/common.py index 0e01b60e81e5..396b5de0b505 100644 --- a/models/common.py +++ b/models/common.py @@ -320,14 +320,16 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, # TensorFlow GraphDef: *.pb # TensorFlow Lite: *.tflite # TensorFlow Edge TPU: *_edgetpu.tflite + # PaddlePaddle: *_paddle_model from models.experimental import attempt_download, attempt_load # scoped to avoid circular import super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self._model_type(w) # get backend + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = self._model_type(w) # type w = attempt_download(w) # download if not local fp16 &= pt or jit or onnx or engine # FP16 stride = 32 # default stride + cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) @@ -351,7 +353,6 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - cuda = torch.cuda.is_available() and device.type != 'cpu' check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] @@ -408,48 +409,60 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, LOGGER.info(f'Loading {w} for CoreML inference...') import coremltools as ct model = ct.models.MLModel(w) - else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - if saved_model: # SavedModel - LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') - import tensorflow as tf - keras = False # assume TF1 saved_model - model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) - elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + elif saved_model: # TF SavedModel + LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + import tensorflow as tf + keras = False # assume TF1 saved_model + model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) + elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + import tensorflow as tf + + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + ge = x.graph.as_graph_element + return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + + gd = tf.Graph().as_graph_def() # TF GraphDef + with open(w, 'rb') as f: + gd.ParseFromString(f.read()) + frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") + elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python + try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: import tensorflow as tf - - def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped - ge = x.graph.as_graph_element - return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) - - gd = tf.Graph().as_graph_def() # graph_def - with open(w, 'rb') as f: - gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") - elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu - from tflite_runtime.interpreter import Interpreter, load_delegate - except ImportError: - import tensorflow as tf - Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, - if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime - LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - delegate = { - 'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) - else: # Lite - LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') - interpreter = Interpreter(model_path=w) # load TFLite model - interpreter.allocate_tensors() # allocate - input_details = interpreter.get_input_details() # inputs - output_details = interpreter.get_output_details() # outputs - elif tfjs: - raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') - else: - raise NotImplementedError(f'ERROR: {w} is not a supported format') + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, + if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime + LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') + delegate = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) + else: # TFLite + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + interpreter = Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + elif tfjs: # TF.js + raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') + elif paddle: # PaddlePaddle + LOGGER.info(f'Loading {w} for PaddlePaddle inference...') + check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') + import paddle.inference as pdi + if not Path(w).is_file(): # if not *.pdmodel + w = next(Path(w).rglob('*.pdmodel')) # get *.xml file from *_openvino_model dir + weights = Path(w).with_suffix('.pdiparams') + config = pdi.Config(str(w), str(weights)) + if cuda: + config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) + predictor = pdi.create_predictor(config) + input_names = predictor.get_input_names() + input_handle = predictor.get_input_handle(input_names[0]) + else: + raise NotImplementedError(f'ERROR: {w} is not a supported format') # class names if 'names' not in locals(): @@ -502,6 +515,13 @@ def forward(self, im, augment=False, visualize=False): else: k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key y = y[k] # output + elif self.paddle: # PaddlePaddle + im = im.cpu().numpy().astype("float32") + self.input_handle.copy_from_cpu(im) + self.predictor.run() + output_names = self.predictor.get_output_names() + output_handle = self.predictor.get_output_handle(output_names[0]) + y = output_handle.copy_to_cpu() else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel @@ -542,13 +562,13 @@ def warmup(self, imgsz=(1, 3, 640, 640)): def _model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx from export import export_formats - suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes - check_suffix(p, suffixes) # checks + sf = list(export_formats().Suffix) + ['.xml'] # export suffixes + check_suffix(p, sf) # checks p = Path(p).name # eliminate trailing separators - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes) + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, xml2 = (s in p for s in sf) xml |= xml2 # *_openvino_model or *.xml tflite &= not edgetpu # *.tflite - return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs + return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle @staticmethod def _load_metadata(f=Path('path/to/meta.yaml')): diff --git a/utils/benchmarks.py b/utils/benchmarks.py index d5f4c1d61fbe..9d5c7f2965d5 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -61,7 +61,7 @@ def run( device = select_device(device) for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: - assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported + assert i not in (9, 10, 11), 'inference not supported' # Edge TPU, TF.js and Paddle are unsupported assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML if 'cpu' in device.type: assert cpu, 'inference not supported on CPU' From 4e8504abd9c1a7287dfcf9f96dfa04f061086cca Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 13:25:01 +0300 Subject: [PATCH 113/326] PaddlePaddle Usage examples (#9358) --- classify/predict.py | 1 + classify/val.py | 1 + detect.py | 1 + export.py | 1 + models/common.py | 2 +- val.py | 1 + 6 files changed, 6 insertions(+), 1 deletion(-) diff --git a/classify/predict.py b/classify/predict.py index 701b5b1ac92d..878cf48b6fef 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -22,6 +22,7 @@ yolov5s-cls.pb # TensorFlow GraphDef yolov5s-cls.tflite # TensorFlow Lite yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-cls_paddle_model # PaddlePaddle """ import argparse diff --git a/classify/val.py b/classify/val.py index bf808bc21a84..3c16ec8092d8 100644 --- a/classify/val.py +++ b/classify/val.py @@ -17,6 +17,7 @@ yolov5s-cls.pb # TensorFlow GraphDef yolov5s-cls.tflite # TensorFlow Lite yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-cls_paddle_model # PaddlePaddle """ import argparse diff --git a/detect.py b/detect.py index 69a1bf13aac6..a69606a3dff9 100644 --- a/detect.py +++ b/detect.py @@ -22,6 +22,7 @@ yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle """ import argparse diff --git a/export.py b/export.py index 262b11a1a268..9d33024a9ca4 100644 --- a/export.py +++ b/export.py @@ -35,6 +35,7 @@ yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example diff --git a/models/common.py b/models/common.py index 396b5de0b505..c601aacc885c 100644 --- a/models/common.py +++ b/models/common.py @@ -312,7 +312,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, # PyTorch: weights = *.pt # TorchScript: *.torchscript # ONNX Runtime: *.onnx - # ONNX OpenCV DNN: *.onnx with --dnn + # ONNX OpenCV DNN: *.onnx --dnn # OpenVINO: *.xml # CoreML: *.mlmodel # TensorRT: *.engine diff --git a/val.py b/val.py index fed5e21577e5..4b0bdddae3b1 100644 --- a/val.py +++ b/val.py @@ -16,6 +16,7 @@ yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle """ import argparse From 2b5c9a83ec4953c68159a924b338a646554a4490 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 22:24:46 +0300 Subject: [PATCH 114/326] labels.jpg names fix (#9361) Partially resolves https://github.com/ultralytics/yolov5/issues/9360 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 0f322b6b5844..0530d0abdf48 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -364,7 +364,7 @@ def plot_labels(labels, names=(), save_dir=Path('')): ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) - ax[0].set_xticklabels(names, rotation=90, fontsize=10) + ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) else: ax[0].set_xlabel('classes') sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) From cafdd189397992cf93ec0ad6b76929c60ff09a17 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 22:58:24 +0300 Subject: [PATCH 115/326] Exclude `ipython` from hubconf.py `check_requirements()` (#9362) Exclude ipython from hubconf.py `check_requirements()` Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index bffe2d588b4f..2f05565629a5 100644 --- a/hubconf.py +++ b/hubconf.py @@ -37,7 +37,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if not verbose: LOGGER.setLevel(logging.WARNING) - check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) + check_requirements(exclude=('ipython', 'opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: From 23d0456b08cac22f783d63292cc7c2bf87a19a60 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 23:55:18 +0300 Subject: [PATCH 116/326] `torch.jit.trace()` fix (#9363) * Update common.py Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 3 +++ models/common.py | 1 + 2 files changed, 4 insertions(+) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 6fb277676959..a83f997cbfc2 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -119,9 +119,12 @@ jobs: python export.py --weights $m.pt --img 64 --include torchscript # export python - < Date: Sun, 11 Sep 2022 13:56:51 +0300 Subject: [PATCH 117/326] AMP Check fix (#9367) Resolves https://github.com/ultralytics/yolov5/issues/9365 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 33232efac9fd..f5fb2c93a3d5 100755 --- a/utils/general.py +++ b/utils/general.py @@ -17,6 +17,7 @@ import sys import time import urllib +from copy import deepcopy from datetime import datetime from itertools import repeat from multiprocessing.pool import ThreadPool @@ -535,7 +536,7 @@ def amp_allclose(model, im): f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) try: - assert amp_allclose(model, im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) + assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) LOGGER.info(f'{prefix}checks passed ✅') return True except Exception: From a4ed9888938a090631ca4dba5be6363f8b66575c Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Wed, 14 Sep 2022 05:50:23 +0800 Subject: [PATCH 118/326] Remove duplicate line in setup.cfg (#9380) --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 020a75740e97..f12995da3e8e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,7 +34,6 @@ ignore = F401 # module imported but unused W504 # line break after binary operator E127 # continuation line over-indented for visual indent - W504 # line break after binary operator E231 # missing whitespace after ‘,’, ‘;’, or ‘:’ E501 # line too long F403 # ‘from module import *’ used; unable to detect undefined names From 1323b4805319ca18e4ffd8f93f3e855b87093ad4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Sep 2022 19:05:10 +0200 Subject: [PATCH 119/326] Remove `.train()` mode exports (#9429) * Remove `.train()` mode exports No common use cases. Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/export.py b/export.py index 9d33024a9ca4..1b25f3f8221b 100644 --- a/export.py +++ b/export.py @@ -126,7 +126,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' @try_export -def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): +def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export check_requirements('onnx') import onnx @@ -140,8 +140,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst f, verbose=False, opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, + do_constant_folding=True, input_names=['images'], output_names=['output'], dynamic_axes={ @@ -459,7 +458,6 @@ def run( include=('torchscript', 'onnx'), # include formats half=False, # FP16 half-precision export inplace=False, # set YOLOv5 Detect() inplace=True - train=False, # model.train() mode keras=False, # use Keras optimize=False, # TorchScript: optimize for mobile int8=False, # CoreML/TF INT8 quantization @@ -501,7 +499,7 @@ def run( im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection # Update model - model.train() if train else model.eval() # training mode = no Detect() layer grid construction + model.eval() for k, m in model.named_modules(): if isinstance(m, Detect): m.inplace = inplace @@ -524,7 +522,7 @@ def run( if engine: # TensorRT required before ONNX f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) if onnx or xml: # OpenVINO requires ONNX - f[2], _ = export_onnx(model, im, file, opset, train, dynamic, simplify) + f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) if xml: # OpenVINO f[3], _ = export_openvino(file, metadata, half) if coreml: # CoreML @@ -578,7 +576,6 @@ def parse_opt(): parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--train', action='store_true', help='model.train() mode') parser.add_argument('--keras', action='store_true', help='TF: use Keras') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') From 36cb05b7b211d4c5d99586dd49d3195de16e4485 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Sep 2022 23:28:33 +0200 Subject: [PATCH 120/326] Continue on Docker arm64 failure (#9430) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/docker.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c89d0ada3219..67ef565474a4 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -30,6 +30,7 @@ jobs: - name: Build and push arm64 image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . platforms: linux/arm64 From 65afaa78beaa3d68d457e9c49109dc6327003962 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Sep 2022 23:53:36 +0200 Subject: [PATCH 121/326] Continue on Docker failure (all backends) (#9432) Continue on Docker failure (all) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 67ef565474a4..f9eec3bd839e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -12,6 +12,7 @@ jobs: if: github.repository == 'ultralytics/yolov5' name: Push Docker image to Docker Hub runs-on: ubuntu-latest + continue-on-error: true steps: - name: Checkout repo uses: actions/checkout@v3 @@ -30,7 +31,6 @@ jobs: - name: Build and push arm64 image uses: docker/build-push-action@v3 - continue-on-error: true with: context: . platforms: linux/arm64 From abea53ea5b7d4eba6b58535d31e17336912d0d1f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 00:10:10 +0200 Subject: [PATCH 122/326] Continue on Docker fail (all backends) fix (#9433) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/docker.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f9eec3bd839e..1d0bd30b22cb 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -12,7 +12,6 @@ jobs: if: github.repository == 'ultralytics/yolov5' name: Push Docker image to Docker Hub runs-on: ubuntu-latest - continue-on-error: true steps: - name: Checkout repo uses: actions/checkout@v3 @@ -31,6 +30,7 @@ jobs: - name: Build and push arm64 image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . platforms: linux/arm64 @@ -40,6 +40,7 @@ jobs: - name: Build and push CPU image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . file: utils/docker/Dockerfile-cpu @@ -48,6 +49,7 @@ jobs: - name: Build and push GPU image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . file: utils/docker/Dockerfile From f9869f7ffdbce757f260d28a6b799c5fa50263ee Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 16 Sep 2022 03:42:46 +0530 Subject: [PATCH 123/326] YOLOv5 segmentation model support (#9052) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix duplicate plots.py * Fix check_font() * # torch.use_deterministic_algorithms(True) * update doc detect->predict * Resolve precommit for segment/train and segment/val * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolve precommit for utils/segment * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolve precommit min_wh * Resolve precommit utils/segment/plots * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolve precommit utils/segment/general * Align NMS-seg closer to NMS * restore deterministic init_seeds code * remove easydict dependency * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * restore output_to_target mask * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * cleanup * Remove unused ImageFont import * Unified NMS * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * DetectMultiBackend compatibility * segment/predict.py update * update plot colors * fix bbox shifted * sort bbox by confidence * enable overlap by default * Merge detect/segment output_to_target() function * Start segmentation CI * fix plots * Update ci-testing.yml * fix training whitespace * optimize process mask functions (can we merge both?) * Update predict/detect * Update plot_images * Update plot_images_and_masks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Add train to CI * fix precommit * fix precommit CI * fix precommit pycocotools * fix val float issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix masks float float issues * suppress errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix no-predictions plotting bug * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add CSV Logger * fix val len(plot_masks) * speed up evaluation * fix process_mask * fix plots * update segment/utils build_targets * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * optimize utils/segment/general crop() * optimize utils/segment/general crop() 2 * minor updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * torch.where revert * downsample only if different shape * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * loss cleanup * loss cleanup 2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * loss cleanup 3 * update project names * Rename -seg yamls from _underscore to -dash * prepare for yolov5n-seg.pt * precommit space fix * add coco128-seg.yaml * update coco128-seg comments * cleanup val.py * Major val.py cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * precommit fix * precommit fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * optional pycocotools * remove CI pip install pycocotools (auto-installed now) * seg yaml fix * optimize mask_iou() and masks_iou() * threaded fix * Major train.py update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Major segments/val/process_batch() update * yolov5/val updates from segment * process_batch numpy/tensor fix * opt-in to pycocotools with --save-json * threaded pycocotools ops for 2x speed increase * Avoid permute contiguous if possible * Add max_det=300 argument to both val.py and segment/val.py * fix onnx_dynamic * speed up pycocotools ops * faster process_mask(upsample=True) for predict * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * eliminate permutations for process_mask(upsample=True) * eliminate permute-contiguous in crop(), use native dimension order * cleanup comment * Add Proto() module * fix class count * fix anchor order * broadcast mask_gti in loss for speed * Cleanup seg loss * faster indexing * faster indexing fix * faster indexing fix2 * revert faster indexing * fix validation plotting * Loss cleanup and mxyxy simplification * Loss cleanup and mxyxy simplification 2 * revert validation plotting * replace missing tanh * Eliminate last permutation * delete unneeded .float() * Remove MaskIOULoss and crop(if HWC) * Final v6.3 SegmentationModel architecture updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add support for TF export * remove debugger trace * add call * update * update * Merge master * Merge master * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py * Restore CI * Update dataloaders.py * Fix TF/TFLite export for segmentation model * Merge master * Cleanup predict.py mask plotting * cleanup scale_masks() * rename scale_masks to scale_image * cleanup/optimize plot_masks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add Annotator.masks() * Annotator.masks() fix * Update plots.py * Annotator mask optimization * Rename crop() to crop_mask() * Do not crop in predict.py * crop always * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Merge master * Add vid-stride from master PR * Update seg model outputs * Update seg model outputs * Add segmentation benchmarks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add segmentation benchmarks * Add segmentation benchmarks * Add segmentation benchmarks * Fix DetectMultiBackend for OpenVINO * update Annotator.masks * fix val plot * revert val plot * clean up * revert pil * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix CI error * fix predict log * remove upsample * update interpolate * fix validation plot logging * Annotator.masks() cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove segmentation_model definition * Restore 0.99999 decimals Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher Co-authored-by: Laughing-q <1185102784@qq.com> Co-authored-by: Jiacong Fang --- .github/workflows/ci-testing.yml | 22 +- utils/benchmarks.py => benchmarks.py | 20 +- data/coco128-seg.yaml | 101 ++++ detect.py | 4 +- models/common.py | 18 +- models/segment/yolov5l-seg.yaml | 48 ++ models/segment/yolov5m-seg.yaml | 48 ++ models/segment/yolov5n-seg.yaml | 48 ++ models/segment/yolov5s-seg.yaml | 48 ++ models/segment/yolov5x-seg.yaml | 48 ++ models/tf.py | 36 +- models/yolo.py | 58 ++- segment/predict.py | 266 +++++++++++ segment/train.py | 676 +++++++++++++++++++++++++++ segment/val.py | 471 +++++++++++++++++++ utils/dataloaders.py | 1 + utils/general.py | 45 +- utils/metrics.py | 10 +- utils/plots.py | 71 ++- utils/segment/__init__.py | 0 utils/segment/augmentations.py | 104 +++++ utils/segment/dataloaders.py | 330 +++++++++++++ utils/segment/general.py | 120 +++++ utils/segment/loss.py | 186 ++++++++ utils/segment/metrics.py | 210 +++++++++ utils/segment/plots.py | 143 ++++++ val.py | 30 +- 27 files changed, 3091 insertions(+), 71 deletions(-) rename utils/benchmarks.py => benchmarks.py (87%) create mode 100644 data/coco128-seg.yaml create mode 100644 models/segment/yolov5l-seg.yaml create mode 100644 models/segment/yolov5m-seg.yaml create mode 100644 models/segment/yolov5n-seg.yaml create mode 100644 models/segment/yolov5s-seg.yaml create mode 100644 models/segment/yolov5x-seg.yaml create mode 100644 segment/predict.py create mode 100644 segment/train.py create mode 100644 segment/val.py mode change 100755 => 100644 utils/dataloaders.py mode change 100755 => 100644 utils/general.py create mode 100644 utils/segment/__init__.py create mode 100644 utils/segment/augmentations.py create mode 100644 utils/segment/dataloaders.py create mode 100644 utils/segment/general.py create mode 100644 utils/segment/loss.py create mode 100644 utils/segment/metrics.py create mode 100644 utils/segment/plots.py diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index a83f997cbfc2..537ba96e7225 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -15,6 +15,7 @@ jobs: Benchmarks: runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: os: [ ubuntu-latest ] python-version: [ '3.9' ] # requires python<=3.9 @@ -37,9 +38,12 @@ jobs: python --version pip --version pip list - - name: Run benchmarks + - name: Benchmark DetectionModel + run: | + python benchmarks.py --data coco128.yaml --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 + - name: Benchmark SegmentationModel run: | - python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 + python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 Tests: timeout-minutes: 60 @@ -126,6 +130,20 @@ jobs: model(im) # warmup, build grids for trace torch.jit.trace(model, [im]) EOF + - name: Test segmentation + shell: bash # for Windows compatibility + run: | + m=${{ matrix.model }}-seg # official weights + b=runs/train-seg/exp/weights/best # best.pt checkpoint + python segment/train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu # train + python segment/train.py --imgsz 64 --batch 32 --weights '' --cfg $m.yaml --epochs 1 --device cpu # train + for d in cpu; do # devices + for w in $m $b; do # weights + python segment/val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val + python segment/predict.py --imgsz 64 --weights $w.pt --device $d # predict + python export.py --weights $w.pt --img 64 --include torchscript --device $d # export + done + done - name: Test classification shell: bash # for Windows compatibility run: | diff --git a/utils/benchmarks.py b/benchmarks.py similarity index 87% rename from utils/benchmarks.py rename to benchmarks.py index 9d5c7f2965d5..58e083c95d55 100644 --- a/utils/benchmarks.py +++ b/benchmarks.py @@ -34,16 +34,19 @@ import pandas as pd FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory +ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH # ROOT = ROOT.relative_to(Path.cwd()) # relative import export -import val +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from segment.val import run as val_seg from utils import notebook_init from utils.general import LOGGER, check_yaml, file_size, print_args from utils.torch_utils import select_device +from val import run as val_det def run( @@ -59,6 +62,7 @@ def run( ): y, t = [], time.time() device = select_device(device) + model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: assert i not in (9, 10, 11), 'inference not supported' # Edge TPU, TF.js and Paddle are unsupported @@ -76,10 +80,14 @@ def run( assert suffix in str(w), 'export failed' # Validate - result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) - metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) - speeds = result[2] # times (preprocess, inference, postprocess) - y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference + if model_type == SegmentationModel: + result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) + else: # DetectionModel: + result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) + speed = result[2][1] # times (preprocess, inference, postprocess) + y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference except Exception as e: if hard_fail: assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml new file mode 100644 index 000000000000..5e81910cc456 --- /dev/null +++ b/data/coco128-seg.yaml @@ -0,0 +1,101 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Example usage: python train.py --data coco128.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco128-seg ← downloads here (7 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128-seg # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco128-seg.zip diff --git a/detect.py b/detect.py index a69606a3dff9..310d169281bf 100644 --- a/detect.py +++ b/detect.py @@ -149,8 +149,8 @@ def run( det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() # Print results - for c in det[:, -1].unique(): - n = (det[:, -1] == c).sum() # detections per class + for c in det[:, 5].unique(): + n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results diff --git a/models/common.py b/models/common.py index 8b7dbbfa95fe..0d90ff4f8827 100644 --- a/models/common.py +++ b/models/common.py @@ -375,7 +375,6 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, if batch_dim.is_static: batch_size = batch_dim.get_length() executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 - output_layer = next(iter(executable_network.outputs)) stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') @@ -491,7 +490,7 @@ def forward(self, im, augment=False, visualize=False): y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 - y = self.executable_network([im])[self.output_layer] + y = list(self.executable_network([im]).values()) elif self.engine: # TensorRT if self.dynamic and im.shape != self.bindings['images'].shape: i_in, i_out = (self.model.get_binding_index(x) for x in ('images', 'output')) @@ -786,8 +785,21 @@ def __str__(self): return '' +class Proto(nn.Module): + # YOLOv5 mask Proto module for segmentation models + def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks + super().__init__() + self.cv1 = Conv(c1, c_, k=3) + self.upsample = nn.Upsample(scale_factor=2, mode='nearest') + self.cv2 = Conv(c_, c_, k=3) + self.cv3 = Conv(c_, c2) + + def forward(self, x): + return self.cv3(self.cv2(self.upsample(self.cv1(x)))) + + class Classify(nn.Module): - # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() c_ = 1280 # efficientnet_b0 size diff --git a/models/segment/yolov5l-seg.yaml b/models/segment/yolov5l-seg.yaml new file mode 100644 index 000000000000..4782de11dd2d --- /dev/null +++ b/models/segment/yolov5l-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml new file mode 100644 index 000000000000..f73d1992ac19 --- /dev/null +++ b/models/segment/yolov5m-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] \ No newline at end of file diff --git a/models/segment/yolov5n-seg.yaml b/models/segment/yolov5n-seg.yaml new file mode 100644 index 000000000000..c28225ab4a50 --- /dev/null +++ b/models/segment/yolov5n-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml new file mode 100644 index 000000000000..7cbdb36b425c --- /dev/null +++ b/models/segment/yolov5s-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.5 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] \ No newline at end of file diff --git a/models/segment/yolov5x-seg.yaml b/models/segment/yolov5x-seg.yaml new file mode 100644 index 000000000000..5d0c4524a99c --- /dev/null +++ b/models/segment/yolov5x-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/models/tf.py b/models/tf.py index ecb0d4d79c78..8cce147059d3 100644 --- a/models/tf.py +++ b/models/tf.py @@ -30,7 +30,7 @@ from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, DWConvTranspose2d, Focus, autopad) from models.experimental import MixConv2d, attempt_load -from models.yolo import Detect +from models.yolo import Detect, Segment from utils.activations import SiLU from utils.general import LOGGER, make_divisible, print_args @@ -320,6 +320,36 @@ def _make_grid(nx=20, ny=20): return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) +class TFSegment(TFDetect): + # YOLOv5 Segment head for segmentation models + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None): + super().__init__(nc, anchors, ch, imgsz, w) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.no = 5 + nc + self.nm # number of outputs per anchor + self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv + self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos + self.detect = TFDetect.call + + def call(self, x): + p = self.proto(x[0]) + x = self.detect(self, x) + return (x, p) if self.training else ((x[0], p),) + + +class TFProto(keras.layers.Layer): + + def __init__(self, c1, c_=256, c2=32, w=None): + super().__init__() + self.cv1 = TFConv(c1, c_, k=3, w=w.cv1) + self.upsample = TFUpsample(None, scale_factor=2, mode='nearest') + self.cv2 = TFConv(c_, c_, k=3, w=w.cv2) + self.cv3 = TFConv(c_, c2, w=w.cv3) + + def call(self, inputs): + return self.cv3(self.cv2(self.upsample(self.cv1(inputs)))) + + class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' @@ -377,10 +407,12 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) args = [ch[f]] elif m is Concat: c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) - elif m is Detect: + elif m in [Detect, Segment]: args.append([ch[x + 1] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) + if m is Segment: + args[3] = make_divisible(args[3] * gw, 8) args.append(imgsz) else: c2 = ch[f] diff --git a/models/yolo.py b/models/yolo.py index fa05fcf9a8d9..a0702a7c0257 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -36,6 +36,7 @@ class Detect(nn.Module): + # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode @@ -63,15 +64,16 @@ def forward(self, x): if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) - y = x[i].sigmoid() + y = x[i].clone() + y[..., :5 + self.nc].sigmoid_() if self.inplace: y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy, wh, etc = y.split((2, 2, self.no - 4), 4) # tensor_split((2, 4, 5), 4) if torch 1.8.0 xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, conf), 4) + y = torch.cat((xy, wh, etc), 4) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) @@ -87,6 +89,23 @@ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version return grid, anchor_grid +class Segment(Detect): + # YOLOv5 Segment head for segmentation models + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): + super().__init__(nc, anchors, ch, inplace) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.no = 5 + nc + self.nm # number of outputs per anchor + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + self.proto = Proto(ch[0], self.npr, self.nm) # protos + self.detect = Detect.forward + + def forward(self, x): + p = self.proto(x[0]) + x = self.detect(self, x) + return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) + + class BaseModel(nn.Module): # YOLOv5 base model def forward(self, x, profile=False, visualize=False): @@ -135,7 +154,7 @@ def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers self = super()._apply(fn) m = self.model[-1] # Detect() - if isinstance(m, Detect): + if isinstance(m, (Detect, Segment)): m.stride = fn(m.stride) m.grid = list(map(fn, m.grid)) if isinstance(m.anchor_grid, list): @@ -169,11 +188,12 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i # Build strides, anchors m = self.model[-1] # Detect() - if isinstance(m, Detect): + if isinstance(m, (Detect, Segment)): s = 256 # 2x min stride m.inplace = self.inplace - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.empty(1, ch, s, s))]) # forward - check_anchor_order(m) # must be in pixel-space (not grid-space) + forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x) + m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) m.anchors /= m.stride.view(-1, 1, 1) self.stride = m.stride self._initialize_biases() # only run once @@ -235,15 +255,21 @@ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1).detach() # conv.bias(255) to (3,85) - b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility +class SegmentationModel(DetectionModel): + # YOLOv5 segmentation model + def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None): + super().__init__(cfg, ch, nc, anchors) + + class ClassificationModel(BaseModel): # YOLOv5 classification model def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index @@ -284,24 +310,28 @@ def parse_model(d, ch): # model_dict, input_channels(3) args[j] = eval(a) if isinstance(a, str) else a # eval strings n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in (Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, - BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x): + if m in { + Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3, C3TR, C3Ghost, C3x]: + if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum(ch[x] for x in f) - elif m is Detect: + # TODO: channel, gw, gd + elif m in {Detect, Segment}: args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) + if m is Segment: + args[3] = make_divisible(args[3] * gw, 8) elif m is Contract: c2 = ch[f] * args[0] ** 2 elif m is Expand: diff --git a/segment/predict.py b/segment/predict.py new file mode 100644 index 000000000000..ba4cf2905255 --- /dev/null +++ b/segment/predict.py @@ -0,0 +1,266 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. + +Usage - sources: + $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch + yolov5s-seg.torchscript # TorchScript + yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-seg.xml # OpenVINO + yolov5s-seg.engine # TensorRT + yolov5s-seg.mlmodel # CoreML (macOS-only) + yolov5s-seg_saved_model # TensorFlow SavedModel + yolov5s-seg.pb # TensorFlow GraphDef + yolov5s-seg.tflite # TensorFlow Lite + yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-seg_paddle_model # PaddlePaddle +""" + +import argparse +import os +import platform +import sys +from pathlib import Path + +import torch + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.segment.general import process_mask +from utils.torch_utils import select_device, smart_inference_mode + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/predict-seg', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride + retina_masks=False, +): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + if is_url and is_file: + source = check_file(source) # download + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + if webcam: + view_img = check_imshow() + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) # batch_size + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + for path, im, im0s, vid_cap, s in dataset: + with dt[0]: + im = torch.from_numpy(im).to(device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred, proto = model(im, augment=augment, visualize=visualize)[:2] + + # NMS + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) + + # Second-stage classifier (optional) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + if len(det): + masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, 5].unique(): + n = (det[:, 5] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Mask plotting + annotator.masks(masks, + colors=[colors(x, True) for x in det[:, 5]], + im_gpu=None if retina_masks else im[i]) + + # Write results + for *xyxy, conf, cls in reversed(det[:, :6]): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(f'{txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + if save_crop: + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + if cv2.waitKey(1) == ord('q'): # 1 millisecond + exit() + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + + # Print results + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/segment/train.py b/segment/train.py new file mode 100644 index 000000000000..bda379176151 --- /dev/null +++ b/segment/train.py @@ -0,0 +1,676 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 segment model on a segment dataset +Models and datasets download automatically from the latest YOLOv5 release. + +Usage - Single-GPU training: + $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended) + $ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch + +Usage - Multi-GPU DDP training: + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 + +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +""" + +import argparse +import math +import os +import random +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import torch.nn.functional as F + +import segment.val as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, + check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, + init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, + print_args, print_mutation, strip_optimizer, yaml_save) +from utils.loggers import GenericLogger +from utils.plots import plot_evolve, plot_labels +from utils.segment.dataloaders import create_dataloader +from utils.segment.loss import ComputeLoss +from utils.segment.metrics import KEYS, fitness +from utils.segment.plots import plot_images_and_masks, plot_results_with_masks +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, + smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio + # callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + logger = GenericLogger(opt=opt, console_logger=LOGGER) + # loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + # if loggers.clearml: + # data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML + # if loggers.wandb: + # data_dict = loggers.wandb.data_dict + # if resume: + # weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size + # + # # Register actions + # for k in methods(loggers): + # callbacks.register_action(k, callback=getattr(loggers, k)) + + # Config + plots = not evolve and not opt.noplots # create plots + overlap = not opt.no_overlap + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + logger.update_params({"batch_size": batch_size}) + # loggers.on_params_update({"batch_size": batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader( + train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + ) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + prefix=colorstr('val: '))[0] + + if not resume: + if not opt.noautoanchor: + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + if plots: + plot_labels(labels, names, save_dir) + # callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + hyp['box'] *= 3 / nl # scale to layers + hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model, overlap=overlap) # init loss class + # callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + # callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(4, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 8) % + ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ + # callbacks.run('on_train_batch_start') + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 6) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) + # if callbacks.stop_training: + # return + + # Mosaic plots + if plots: + if ni < 3: + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") + if ni == 10: + files = sorted(save_dir.glob('train*.jpg')) + logger.log_images(files, "Mosaics", epoch) + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + # callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + # Log val metrics and media + metrics_dict = dict(zip(KEYS, log_vals)) + logger.log_metrics(metrics_dict, epoch) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + # 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, + 'opt': vars(opt), + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + logger.log_model(w / f'epoch{epoch}.pt') + del ckpt + # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) # val best model with plots + if is_coco: + # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr)) + logger.log_metrics(metrics_dict, epoch) + + # callbacks.run('on_train_end', last, best, epoch, results) + # on train end callback using genericLogger + logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs) + if not opt.evolve: + logger.log_model(best, epoch) + if plots: + plot_results_with_masks(file=save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + logger.log_images(files, "Results", epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + + # Instance Segmentation Args + parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') + parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') + + # Weights & Biases arguments + # parser.add_argument('--entity', default=None, help='W&B: Entity') + # parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') + # parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') + # parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + check_git_status() + check_requirements() + + # Resume + if opt.resume and not opt.evolve: # resume from specified or most recent last.pt + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + print_mutation(results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/segment/val.py b/segment/val.py new file mode 100644 index 000000000000..138aa00aaed3 --- /dev/null +++ b/segment/val.py @@ -0,0 +1,471 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a trained YOLOv5 segment model on a segment dataset + +Usage: + $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) + $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640- # validate COCO-segments + +Usage - formats: + $ python segment/val.py --weights yolov5s-seg.pt # PyTorch + yolov5s-seg.torchscript # TorchScript + yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-seg.xml # OpenVINO + yolov5s-seg.engine # TensorRT + yolov5s-seg.mlmodel # CoreML (macOS-only) + yolov5s-seg_saved_model # TensorFlow SavedModel + yolov5s-seg.pb # TensorFlow GraphDef + yolov5s-seg.tflite # TensorFlow Lite + yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-seg_paddle_model # PaddlePaddle +""" + +import argparse +import json +import os +import sys +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import torch.nn.functional as F + +from models.common import DetectMultiBackend +from models.yolo import SegmentationModel +from utils.callbacks import Callbacks +from utils.general import (LOGGER, NUM_THREADS, Profile, check_dataset, check_img_size, check_requirements, check_yaml, + coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, + scale_coords, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, box_iou +from utils.plots import output_to_target, plot_val_study +from utils.segment.dataloaders import create_dataloader +from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image +from utils.segment.metrics import Metrics, ap_per_class_box_and_mask +from utils.segment.plots import plot_images_and_masks +from utils.torch_utils import de_parallel, select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map, pred_masks): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + from pycocotools.mask import encode + + def single_encode(x): + rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] + rle["counts"] = rle["counts"].decode("utf-8") + return rle + + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + pred_masks = np.transpose(pred_masks, (2, 0, 1)) + with ThreadPool(NUM_THREADS) as pool: + rles = pool.map(single_encode, pred_masks) + for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5), + 'segmentation': rles[i]}) + + +def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if masks: + if overlap: + nl = len(labels) + index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 + gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) + gt_masks = torch.where(gt_masks == index, 1.0, 0.0) + if gt_masks.shape[1:] != pred_masks.shape[1:]: + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = gt_masks.gt_(0.5) + iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val-seg', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + overlap=False, + mask_downsample_ratio=1, + compute_loss=None, + callbacks=Callbacks(), +): + if save_json: + check_requirements(['pycocotools']) + process = process_mask_upsample # more accurate + else: + process = process_mask # faster + + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + nm = de_parallel(model).model[-1].nm # number of masks + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad = 0.0 if task in ('speed', 'benchmark') else 0.5 + rect = False if task == 'benchmark' else pt # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f'{task}: '), + overlap_mask=overlap, + mask_downsample_ratio=mask_downsample_ratio)[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", + "mAP50", "mAP50-95)") + dt = Profile(), Profile(), Profile() + metrics = Metrics() + loss = torch.zeros(4, device=device) + jdict, stats = [], [] + # callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): + # callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + masks = masks.to(device) + masks = masks.float() + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) + + # Loss + if compute_loss: + loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det, + nm=nm) + + # Metrics + plot_masks = [] # masks for plotting + for si, (pred, proto) in enumerate(zip(preds, protos)): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + continue + + # Masks + midx = [si] if overlap else targets[:, 0] == si + gt_masks = masks[midx] + pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct_bboxes = process_batch(predn, labelsn, iouv) + correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) + + pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) + if plots and batch_i < 3: + plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + pred_masks = scale_image(im[si].shape[1:], + pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) + save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary + # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + if len(plot_masks): + plot_masks = torch.cat(plot_masks, dim=0) + plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) + plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, + save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + # callbacks.run('on_val_batch_end') + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) + metrics.update(results) + nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format + LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) + if nt.sum() == 0: + LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(metrics.ap_class_index): + LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + # callbacks.run('on_val_end') + + mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + results = [] + for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) + map_bbox, map50_bbox, map_mask, map50_mask = results + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask + return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + # opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + if opt.save_hybrid: + LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = True # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/utils/dataloaders.py b/utils/dataloaders.py old mode 100755 new mode 100644 index d8ef11fd94b4..c04be853c580 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -484,6 +484,7 @@ def __init__(self, self.im_files = [self.im_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.labels = [self.labels[i] for i in irect] + self.segments = [self.segments[i] for i in irect] self.shapes = s[irect] # wh ar = ar[irect] diff --git a/utils/general.py b/utils/general.py old mode 100755 new mode 100644 index f5fb2c93a3d5..8633511f89f5 --- a/utils/general.py +++ b/utils/general.py @@ -798,15 +798,18 @@ def clip_coords(boxes, shape): boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 -def non_max_suppression(prediction, - conf_thres=0.25, - iou_thres=0.45, - classes=None, - agnostic=False, - multi_label=False, - labels=(), - max_det=300): - """Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes +def non_max_suppression( + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nm=0, # number of masks +): + """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] @@ -816,7 +819,7 @@ def non_max_suppression(prediction, prediction = prediction[0] # select only inference output bs = prediction.shape[0] # batch size - nc = prediction.shape[2] - 5 # number of classes + nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates # Checks @@ -827,13 +830,14 @@ def non_max_suppression(prediction, # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - time_limit = 0.3 + 0.03 * bs # seconds to quit after + time_limit = 0.5 + 0.05 * bs # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() - output = [torch.zeros((0, 6), device=prediction.device)] * bs + mi = 5 + nc # mask start index + output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs for xi, x in enumerate(prediction): # image index, image inference # Apply constraints # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height @@ -842,7 +846,7 @@ def non_max_suppression(prediction, # Cat apriori labels if autolabelling if labels and len(labels[xi]): lb = labels[xi] - v = torch.zeros((len(lb), nc + 5), device=x.device) + v = torch.zeros((len(lb), nc + nm + 5), device=x.device) v[:, :4] = lb[:, 1:5] # box v[:, 4] = 1.0 # conf v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls @@ -855,16 +859,17 @@ def non_max_suppression(prediction, # Compute conf x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf - # Box (center x, center y, width, height) to (x1, y1, x2, y2) - box = xywh2xyxy(x[:, :4]) + # Box/Mask + box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2) + mask = x[:, mi:] # zero columns if no masks # Detections matrix nx6 (xyxy, conf, cls) if multi_label: - i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1) else: # best class only - conf, j = x[:, 5:].max(1, keepdim=True) - x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + conf, j = x[:, 5:mi].max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] # Filter by class if classes is not None: @@ -880,6 +885,8 @@ def non_max_suppression(prediction, continue elif n > max_nms: # excess boxes x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + else: + x = x[x[:, 4].argsort(descending=True)] # sort by confidence # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes diff --git a/utils/metrics.py b/utils/metrics.py index ee7d33982cfc..001813cbcd65 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -28,7 +28,7 @@ def smooth(y, f=0.05): return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -83,10 +83,10 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data names = dict(enumerate(names)) # to dict if plot: - plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) - plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') - plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') - plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall') i = smooth(f1.mean(0), 0.1).argmax() # max F1 index p, r, f1 = p[:, i], r[:, i], f1[:, i] diff --git a/utils/plots.py b/utils/plots.py index 0530d0abdf48..d8d5b225a774 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -23,6 +23,7 @@ from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, is_ascii, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness +from utils.segment.general import scale_image # Settings RANK = int(os.getenv('RANK', -1)) @@ -113,6 +114,52 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 thickness=tf, lineType=cv2.LINE_AA) + def masks(self, masks, colors, im_gpu=None, alpha=0.5): + """Plot masks at once. + Args: + masks (tensor): predicted masks on cuda, shape: [n, h, w] + colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] + im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] + alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque + """ + if self.pil: + # convert to numpy first + self.im = np.asarray(self.im).copy() + if im_gpu is None: + # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...) + if len(masks) == 0: + return + if isinstance(masks, torch.Tensor): + masks = torch.as_tensor(masks, dtype=torch.uint8) + masks = masks.permute(1, 2, 0).contiguous() + masks = masks.cpu().numpy() + # masks = np.ascontiguousarray(masks.transpose(1, 2, 0)) + masks = scale_image(masks.shape[:2], masks, self.im.shape) + masks = np.asarray(masks, dtype=np.float32) + colors = np.asarray(colors, dtype=np.float32) # shape(n,3) + s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together + masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3) + self.im[:] = masks * alpha + self.im * (1 - s * alpha) + else: + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255).byte().cpu().numpy() + self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape) + if self.pil: + # convert im back to PIL and update draw + self.fromarray(self.im) + def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) self.draw.rectangle(xy, fill, outline, width) @@ -124,6 +171,11 @@ def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): xy[1] += 1 - h self.draw.text(xy, text, fill=txt_color, font=self.font) + def fromarray(self, im): + # Update self.im from a numpy array + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + def result(self): # Return annotated image as array return np.asarray(self.im) @@ -180,26 +232,31 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def output_to_target(output): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] +def output_to_target(output, max_det=300): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting targets = [] for i, o in enumerate(output): - targets.extend([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf] for *box, conf, cls in o.cpu().numpy()) - return np.array(targets) + box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) + j = torch.full((conf.shape[0], 1), i) + targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1)) + return torch.cat(targets, 0).numpy() @threaded -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): +def plot_images(images, targets, paths=None, fname='images.jpg', names=None): # Plot image grid with labels if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(targets, torch.Tensor): targets = targets.cpu().numpy() - if np.max(images[0]) <= 1: - images *= 255 # de-normalise (optional) + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init diff --git a/utils/segment/__init__.py b/utils/segment/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py new file mode 100644 index 000000000000..169addedf0f5 --- /dev/null +++ b/utils/segment/augmentations.py @@ -0,0 +1,104 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np + +from ..augmentations import box_candidates +from ..general import resample_segments, segment2box + + +def mixup(im, labels, segments, im2, labels2, segments2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + segments = np.concatenate((segments, segments2), 0) + return im, labels, segments + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) + T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + new_segments = [] + if n: + new = np.zeros((n, 4)) + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + new_segments.append(xy) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) + targets = targets[i] + targets[:, 1:5] = new[i] + new_segments = np.array(new_segments)[i] + + return im, targets, new_segments diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py new file mode 100644 index 000000000000..f6fe642d077f --- /dev/null +++ b/utils/segment/dataloaders.py @@ -0,0 +1,330 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders +""" + +import os +import random + +import cv2 +import numpy as np +import torch +from torch.utils.data import DataLoader, distributed + +from ..augmentations import augment_hsv, copy_paste, letterbox +from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker +from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn +from ..torch_utils import torch_distributed_zero_first +from .augmentations import mixup, random_perspective + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix='', + shuffle=False, + mask_downsample_ratio=1, + overlap_mask=False): + if rect and shuffle: + LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabelsAndMasks( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix, + downsample_ratio=mask_downsample_ratio, + overlap=overlap_mask) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + # generator = torch.Generator() + # generator.manual_seed(0) + return loader( + dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, + worker_init_fn=seed_worker, + # generator=generator, + ), dataset + + +class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing + + def __init__( + self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0, + prefix="", + downsample_ratio=1, + overlap=False, + ): + super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, + stride, pad, prefix) + self.downsample_ratio = downsample_ratio + self.overlap = overlap + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + masks = [] + if mosaic: + # Load mosaic + img, labels, segments = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp["mixup"]: + img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy + segments = self.segments[index].copy() + if len(segments): + for i_s in range(len(segments)): + segments[i_s] = xyn2xy( + segments[i_s], + ratio[0] * w, + ratio[1] * h, + padw=pad[0], + padh=pad[1], + ) + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels, segments = random_perspective( + img, + labels, + segments=segments, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"], + return_seg=True, + ) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) + if self.overlap: + masks, sorted_idx = polygons2masks_overlap(img.shape[:2], + segments, + downsample_ratio=self.downsample_ratio) + masks = masks[None] # (640, 640) -> (1, 640, 640) + labels = labels[sorted_idx] + else: + masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) + + masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // + self.downsample_ratio, img.shape[1] // + self.downsample_ratio)) + # TODO: albumentations support + if self.augment: + # Albumentations + # there are some augmentation that won't change boxes and masks, + # so just be it for now. + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + + # Flip up-down + if random.random() < hyp["flipud"]: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + masks = torch.flip(masks, dims=[1]) + + # Flip left-right + if random.random() < hyp["fliplr"]: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + masks = torch.flip(masks, dims=[2]) + + # Cutouts # labels = cutout(img, labels, p=0.5) + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + + # 3 additional image indices + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels, segments = self.labels[index].copy(), self.segments[index].copy() + + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp["degrees"], + translate=self.hyp["translate"], + scale=self.hyp["scale"], + shear=self.hyp["shear"], + perspective=self.hyp["perspective"], + border=self.mosaic_border) # border to remove + return img4, labels4, segments4 + + @staticmethod + def collate_fn(batch): + img, label, path, shapes, masks = zip(*batch) # transposed + batched_masks = torch.cat(masks, 0) + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks + + +def polygon2mask(img_size, polygons, color=1, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (np.ndarray): [N, M], N is the number of polygons, + M is the number of points(Be divided by 2). + """ + mask = np.zeros(img_size, dtype=np.uint8) + polygons = np.asarray(polygons) + polygons = polygons.astype(np.int32) + shape = polygons.shape + polygons = polygons.reshape(shape[0], -1, 2) + cv2.fillPoly(mask, polygons, color=color) + nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio) + # NOTE: fillPoly firstly then resize is trying the keep the same way + # of loss calculation when mask-ratio=1. + mask = cv2.resize(mask, (nw, nh)) + return mask + + +def polygons2masks(img_size, polygons, color, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (list[np.ndarray]): each polygon is [N, M], + N is the number of polygons, + M is the number of points(Be divided by 2). + """ + masks = [] + for si in range(len(polygons)): + mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio) + masks.append(mask) + return np.array(masks) + + +def polygons2masks_overlap(img_size, segments, downsample_ratio=1): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), dtype=np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask( + img_size, + [segments[si].reshape(-1)], + downsample_ratio=downsample_ratio, + color=1, + ) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index diff --git a/utils/segment/general.py b/utils/segment/general.py new file mode 100644 index 000000000000..36547ed0889c --- /dev/null +++ b/utils/segment/general.py @@ -0,0 +1,120 @@ +import cv2 +import torch +import torch.nn.functional as F + + +def crop_mask(masks, boxes): + """ + "Crop" predicted masks by zeroing out everything not in the predicted bbox. + Vectorized by Chong (thanks Chong). + + Args: + - masks should be a size [h, w, n] tensor of masks + - boxes should be a size [n, 4] tensor of bbox coords in relative point form + """ + + n, h, w = masks.shape + x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) + r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) + c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) + + return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) + + +def process_mask_upsample(protos, masks_in, bboxes, shape): + """ + Crop after upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def process_mask(protos, masks_in, bboxes, shape, upsample=False): + """ + Crop before upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + ih, iw = shape + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW + + downsampled_bboxes = bboxes.clone() + downsampled_bboxes[:, 0] *= mw / iw + downsampled_bboxes[:, 2] *= mw / iw + downsampled_bboxes[:, 3] *= mh / ih + downsampled_bboxes[:, 1] *= mh / ih + + masks = crop_mask(masks, downsampled_bboxes) # CHW + if upsample: + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + return masks.gt_(0.5) + + +def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): + """ + img1_shape: model input shape, [h, w] + img0_shape: origin pic shape, [h, w, 3] + masks: [h, w, num] + """ + # Rescale coordinates (xyxy) from im1_shape to im0_shape + if ratio_pad is None: # calculate from im0_shape + gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new + pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding + else: + pad = ratio_pad[1] + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) + + if len(masks.shape) < 2: + raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') + masks = masks[top:bottom, left:right] + # masks = masks.permute(2, 0, 1).contiguous() + # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] + # masks = masks.permute(1, 2, 0).contiguous() + masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) + + if len(masks.shape) == 2: + masks = masks[:, :, None] + return masks + + +def mask_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [M, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, [N, M] + """ + intersection = torch.matmul(mask1, mask2.t()).clamp(0) + union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [N, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, (N, ) + """ + intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) + union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection + return intersection / (union + eps) diff --git a/utils/segment/loss.py b/utils/segment/loss.py new file mode 100644 index 000000000000..b45b2c27e0a0 --- /dev/null +++ b/utils/segment/loss.py @@ -0,0 +1,186 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..general import xywh2xyxy +from ..loss import FocalLoss, smooth_BCE +from ..metrics import bbox_iou +from ..torch_utils import de_parallel +from .general import crop_mask + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False, overlap=False): + self.sort_obj_iou = False + self.overlap = overlap + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + self.device = device + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.nm = m.nm # number of masks + self.anchors = m.anchors + self.device = device + + def __call__(self, preds, targets, masks): # predictions, targets, model + p, proto = preds + bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width + lcls = torch.zeros(1, device=self.device) + lbox = torch.zeros(1, device=self.device) + lobj = torch.zeros(1, device=self.device) + lseg = torch.zeros(1, device=self.device) + tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj + + n = b.shape[0] # number of targets + if n: + pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions + + # Box regression + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(pcls, t) # BCE + + # Mask regression + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized + mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) + for bi in b.unique(): + j = b == bi # matching index + if self.overlap: + mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0) + else: + mask_gti = masks[tidxs[i]][j] + lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j]) + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp["box"] + lobj *= self.hyp["obj"] + lcls *= self.hyp["cls"] + lseg *= self.hyp["box"] / bs + + loss = lbox + lobj + lcls + lseg + return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] + gain = torch.ones(8, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + if self.overlap: + batch = p[0].shape[0] + ti = [] + for i in range(batch): + num = (targets[:, 0] == i).sum() # find number of targets of each image + ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num) + ti = torch.cat(ti, 1) # (na, nt) + else: + ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1) + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + anchors, shape = self.anchors[i], p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors + (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + tidxs.append(tidx) + xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized + + return tcls, tbox, indices, anch, tidxs, xywhn diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py new file mode 100644 index 000000000000..b09ce23fb9e3 --- /dev/null +++ b/utils/segment/metrics.py @@ -0,0 +1,210 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" + +import numpy as np + +from ..metrics import ap_per_class + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] + return (x[:, :8] * w).sum(1) + + +def ap_per_class_box_and_mask( + tp_m, + tp_b, + conf, + pred_cls, + target_cls, + plot=False, + save_dir=".", + names=(), +): + """ + Args: + tp_b: tp of boxes. + tp_m: tp of masks. + other arguments see `func: ap_per_class`. + """ + results_boxes = ap_per_class(tp_b, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Box")[2:] + results_masks = ap_per_class(tp_m, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Mask")[2:] + + results = { + "boxes": { + "p": results_boxes[0], + "r": results_boxes[1], + "ap": results_boxes[3], + "f1": results_boxes[2], + "ap_class": results_boxes[4]}, + "masks": { + "p": results_masks[0], + "r": results_masks[1], + "ap": results_masks[3], + "f1": results_masks[2], + "ap_class": results_masks[4]}} + return results + + +class Metric: + + def __init__(self) -> None: + self.p = [] # (nc, ) + self.r = [] # (nc, ) + self.f1 = [] # (nc, ) + self.all_ap = [] # (nc, 10) + self.ap_class_index = [] # (nc, ) + + @property + def ap50(self): + """AP@0.5 of all classes. + Return: + (nc, ) or []. + """ + return self.all_ap[:, 0] if len(self.all_ap) else [] + + @property + def ap(self): + """AP@0.5:0.95 + Return: + (nc, ) or []. + """ + return self.all_ap.mean(1) if len(self.all_ap) else [] + + @property + def mp(self): + """mean precision of all classes. + Return: + float. + """ + return self.p.mean() if len(self.p) else 0.0 + + @property + def mr(self): + """mean recall of all classes. + Return: + float. + """ + return self.r.mean() if len(self.r) else 0.0 + + @property + def map50(self): + """Mean AP@0.5 of all classes. + Return: + float. + """ + return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 + + @property + def map(self): + """Mean AP@0.5:0.95 of all classes. + Return: + float. + """ + return self.all_ap.mean() if len(self.all_ap) else 0.0 + + def mean_results(self): + """Mean of results, return mp, mr, map50, map""" + return (self.mp, self.mr, self.map50, self.map) + + def class_result(self, i): + """class-aware result, return p[i], r[i], ap50[i], ap[i]""" + return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) + + def get_maps(self, nc): + maps = np.zeros(nc) + self.map + for i, c in enumerate(self.ap_class_index): + maps[c] = self.ap[i] + return maps + + def update(self, results): + """ + Args: + results: tuple(p, r, ap, f1, ap_class) + """ + p, r, all_ap, f1, ap_class_index = results + self.p = p + self.r = r + self.all_ap = all_ap + self.f1 = f1 + self.ap_class_index = ap_class_index + + +class Metrics: + """Metric for boxes and masks.""" + + def __init__(self) -> None: + self.metric_box = Metric() + self.metric_mask = Metric() + + def update(self, results): + """ + Args: + results: Dict{'boxes': Dict{}, 'masks': Dict{}} + """ + self.metric_box.update(list(results["boxes"].values())) + self.metric_mask.update(list(results["masks"].values())) + + def mean_results(self): + return self.metric_box.mean_results() + self.metric_mask.mean_results() + + def class_result(self, i): + return self.metric_box.class_result(i) + self.metric_mask.class_result(i) + + def get_maps(self, nc): + return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) + + @property + def ap_class_index(self): + # boxes and masks have the same ap_class_index + return self.metric_box.ap_class_index + + +KEYS = [ + "train/box_loss", + "train/seg_loss", # train loss + "train/obj_loss", + "train/cls_loss", + "metrics/precision(B)", + "metrics/recall(B)", + "metrics/mAP_0.5(B)", + "metrics/mAP_0.5:0.95(B)", # metrics + "metrics/precision(M)", + "metrics/recall(M)", + "metrics/mAP_0.5(M)", + "metrics/mAP_0.5:0.95(M)", # metrics + "val/box_loss", + "val/seg_loss", # val loss + "val/obj_loss", + "val/cls_loss", + "x/lr0", + "x/lr1", + "x/lr2",] + +BEST_KEYS = [ + "best/epoch", + "best/precision(B)", + "best/recall(B)", + "best/mAP_0.5(B)", + "best/mAP_0.5:0.95(B)", + "best/precision(M)", + "best/recall(M)", + "best/mAP_0.5(M)", + "best/mAP_0.5:0.95(M)",] diff --git a/utils/segment/plots.py b/utils/segment/plots.py new file mode 100644 index 000000000000..e882c14390f0 --- /dev/null +++ b/utils/segment/plots.py @@ -0,0 +1,143 @@ +import contextlib +import math +from pathlib import Path + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch + +from .. import threaded +from ..general import xywh2xyxy +from ..plots import Annotator, colors + + +@threaded +def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + if isinstance(masks, torch.Tensor): + masks = masks.cpu().numpy().astype(int) + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + idx = targets[:, 0] == i + ti = targets[idx] # image targets + + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + + # Plot masks + if len(masks): + if masks.max() > 1.0: # mean that masks are overlap + image_masks = masks[[i]] # (1, 640, 640) + nl = len(ti) + index = np.arange(nl).reshape(nl, 1, 1) + 1 + image_masks = np.repeat(image_masks, nl, axis=0) + image_masks = np.where(image_masks == index, 1.0, 0.0) + else: + image_masks = masks[idx] + + im = np.asarray(annotator.im).copy() + for j, box in enumerate(boxes.T.tolist()): + if labels or conf[j] > 0.25: # 0.25 conf thresh + color = colors(classes[j]) + mh, mw = image_masks[j].shape + if mh != h or mw != w: + mask = image_masks[j].astype(np.uint8) + mask = cv2.resize(mask, (w, h)) + mask = mask.astype(np.bool) + else: + mask = image_masks[j].astype(np.bool) + with contextlib.suppress(Exception): + im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + annotator.fromarray(im) + annotator.im.save(fname) # save + + +def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob("results*.csv")) + assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + for f in files: + try: + data = pd.read_csv(f) + index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + + 0.1 * data.values[:, 11]) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + if best: + # best + ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + else: + # last + ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print(f"Warning: Plotting error for {f}: {e}") + ax[1].legend() + fig.savefig(save_dir / "results.png", dpi=200) + plt.close() diff --git a/val.py b/val.py index 4b0bdddae3b1..6a0f18e28392 100644 --- a/val.py +++ b/val.py @@ -71,12 +71,12 @@ def save_one_json(predn, jdict, path, class_map): def process_batch(detections, labels, iouv): """ - Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. + Return correct prediction matrix Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 Returns: - correct (Array[N, 10]), for 10 IoU levels + correct (array[N, 10]), for 10 IoU levels """ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) iou = box_iou(labels[:, 1:], detections[:, :4]) @@ -102,6 +102,7 @@ def run( imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) @@ -187,7 +188,7 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] @@ -205,7 +206,7 @@ def run( # Inference with dt[1]: - out, train_out = model(im) if compute_loss else (model(im, augment=augment), None) + preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None) # Loss if compute_loss: @@ -215,10 +216,16 @@ def run( targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: - out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det) # Metrics - for si, pred in enumerate(out): + for si, pred in enumerate(preds): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] @@ -258,9 +265,9 @@ def run( # Plot images if plots and batch_i < 3: plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels - plot_images(im, output_to_target(out), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred - callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, out) + callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds) # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy @@ -332,11 +339,12 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') From 58ad5ca5ce6b4fb3da6420bcc7b11a09e20674fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 00:21:13 +0200 Subject: [PATCH 124/326] Fix val.py zero-TP bug (#9431) Resolves https://github.com/ultralytics/yolov5/issues/9400 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- val.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/val.py b/val.py index 6a0f18e28392..e003d2144b7f 100644 --- a/val.py +++ b/val.py @@ -189,7 +189,8 @@ def run( names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') - dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + dt = Profile(), Profile(), Profile() # profiling times loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] callbacks.run('on_val_start') From a1e5f9a97de2a3ace012315208c686744ced2782 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 00:55:21 +0200 Subject: [PATCH 125/326] New model.yaml `activation:` field (#9371) * New model.yaml `activation:` field Add optional model yaml activation field to define model-wide activations, i.e.: ```yaml activation: nn.LeakyReLU(0.1) # activation with arguments activation: nn.SiLU() # activation with no arguments ``` Signed-off-by: Glenn Jocher * Update yolo.py Signed-off-by: Glenn Jocher * Add example models * l to m models * update * Add yolov5s-LeakyReLU.yaml * Update yolov5s-LeakyReLU.yaml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 8 +++-- models/hub/yolov5s-LeakyReLU.yaml | 49 +++++++++++++++++++++++++++++++ models/yolo.py | 6 +++- 3 files changed, 59 insertions(+), 4 deletions(-) create mode 100644 models/hub/yolov5s-LeakyReLU.yaml diff --git a/models/common.py b/models/common.py index 0d90ff4f8827..debbc2d03f60 100644 --- a/models/common.py +++ b/models/common.py @@ -39,11 +39,13 @@ def autopad(k, p=None, d=1): # kernel, padding, dilation class Conv(nn.Module): # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) + act = nn.SiLU() # default activation + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + self.act = self.act if act is True else act if isinstance(act, nn.Module) else nn.Identity() def forward(self, x): return self.act(self.bn(self.conv(x))) @@ -54,8 +56,8 @@ def forward_fuse(self, x): class DWConv(Conv): # Depth-wise convolution - def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act) class DWConvTranspose2d(nn.ConvTranspose2d): diff --git a/models/hub/yolov5s-LeakyReLU.yaml b/models/hub/yolov5s-LeakyReLU.yaml new file mode 100644 index 000000000000..3a179bf3311c --- /dev/null +++ b/models/hub/yolov5s-LeakyReLU.yaml @@ -0,0 +1,49 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolo.py b/models/yolo.py index a0702a7c0257..46039c36d7e1 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -297,8 +297,12 @@ def _from_yaml(self, cfg): def parse_model(d, ch): # model_dict, input_channels(3) + # Parse a YOLOv5 model.yaml dictionary LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") - anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') + if act: + Conv.act = eval(act) # redefine default activation, i.e. Conv.act = nn.SiLU() + LOGGER.info(f"{colorstr('activation:')} {act}") # print na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) From c7a2d6bcf4f7e88db53f3d09a8484391dac7bc89 Mon Sep 17 00:00:00 2001 From: Hoyeong-GenGenAI <5404902+hotohoto@users.noreply.github.com> Date: Fri, 16 Sep 2022 18:53:18 +0900 Subject: [PATCH 126/326] Fix tick labels for background FN/FP (#9414) * Fix tick labels for background FN/FP In the confusion matrix. * Remove FP/FN from the background labels of the confusion matrix * Update metrics.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/metrics.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 001813cbcd65..021a46ce5d37 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -170,12 +170,12 @@ def process_batch(self, detections, labels): if n and sum(j) == 1: self.matrix[detection_classes[m1[j]], gc] += 1 # correct else: - self.matrix[self.nc, gc] += 1 # background FP + self.matrix[self.nc, gc] += 1 # true background if n: for i, dc in enumerate(detection_classes): if not any(m1 == i): - self.matrix[dc, self.nc] += 1 # background FN + self.matrix[dc, self.nc] += 1 # predicted background def matrix(self): return self.matrix @@ -197,6 +197,7 @@ def plot(self, normalize=True, save_dir='', names=()): nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + ticklabels = (names + ['background']) if labels else "auto" with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered sn.heatmap(array, @@ -208,8 +209,8 @@ def plot(self, normalize=True, save_dir='', names=()): fmt='.2f', square=True, vmin=0.0, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + xticklabels=ticklabels, + yticklabels=ticklabels).set_facecolor((1, 1, 1)) ax.set_ylabel('True') ax.set_ylabel('Predicted') ax.set_title('Confusion Matrix') From 03f2ca8eff8918b98169256d055353a1f15b8e32 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 12:31:43 +0200 Subject: [PATCH 127/326] Fix TensorRT exports to ONNX opset 12 (#9441) * Fix TensorRT exports to ONNX opset 12 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 1b25f3f8221b..cc4386ae4916 100644 --- a/export.py +++ b/export.py @@ -251,7 +251,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose model.model[-1].anchor_grid = grid else: # TensorRT >= 8 check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 13, False, dynamic, simplify) # opset 13 + export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 onnx = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') @@ -274,11 +274,10 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose inputs = [network.get_input(i) for i in range(network.num_inputs)] outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') if dynamic: if im.shape[0] <= 1: @@ -288,7 +287,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) config.add_optimization_profile(profile) - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}') if builder.platform_has_fast_fp16 and half: config.set_flag(trt.BuilderFlag.FP16) with builder.build_engine(network, config) as engine, open(f, 'wb') as t: From 2ac4b634c745cc46c4728e682c6da66f79f6416a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 17:25:44 +0200 Subject: [PATCH 128/326] AutoShape explicit arguments fix (#9443) * AutoShape explicit arguments fix Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index debbc2d03f60..85b82e10a4e1 100644 --- a/models/common.py +++ b/models/common.py @@ -633,7 +633,7 @@ def forward(self, ims, size=640, augment=False, profile=False): autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(ims, torch.Tensor): # torch with amp.autocast(autocast): - return self.model(ims.to(p.device).type_as(p), augment, profile) # inference + return self.model(ims.to(p.device).type_as(p), augment=augment) # inference # Pre-process n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images @@ -662,7 +662,7 @@ def forward(self, ims, size=640, augment=False, profile=False): with amp.autocast(autocast): # Inference with dt[1]: - y = self.model(x, augment, profile) # forward + y = self.model(x, augment=augment) # forward # Post-process with dt[2]: @@ -696,7 +696,7 @@ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) # number of images (batch size) self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) - self.s = shape # inference BCHW shape + self.s = tuple(shape) # inference BCHW shape def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): crops = [] @@ -726,7 +726,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: - print(s.rstrip(', ')) + LOGGER.info(s.rstrip(', ')) if show: im.show(self.files[i]) # show if save: @@ -743,7 +743,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False def print(self): self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t) def show(self, labels=True): self.display(show=True, labels=labels) # show results From fe10b4abc054cba1b5fab1d3598b3caf77b53859 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 18:36:55 +0200 Subject: [PATCH 129/326] Update Detections() instance printing (#9445) * Update Detections() instance printing Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/models/common.py b/models/common.py index 85b82e10a4e1..9c08120fe7f6 100644 --- a/models/common.py +++ b/models/common.py @@ -698,14 +698,15 @@ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) self.s = tuple(shape) # inference BCHW shape - def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): - crops = [] + def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): + s, crops = '', [] for i, (im, pred) in enumerate(zip(self.ims, self.pred)): - s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string + s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string if pred.shape[0]: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + s = s.rstrip(', ') if show or save or render or crop: annotator = Annotator(im, example=str(self.names)) for *box, conf, cls in reversed(pred): # xyxy, confidence, class @@ -725,8 +726,6 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False s += '(no detections)' im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np - if pprint: - LOGGER.info(s.rstrip(', ')) if show: im.show(self.files[i]) # show if save: @@ -736,28 +735,27 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: self.ims[i] = np.asarray(im) + if pprint: + s = s.lstrip('\n') + return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t if crop: if save: LOGGER.info(f'Saved results to {save_dir}\n') return crops - def print(self): - self.display(pprint=True) # print results - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t) - def show(self, labels=True): - self.display(show=True, labels=labels) # show results + self._run(show=True, labels=labels) # show results def save(self, labels=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(save=True, labels=labels, save_dir=save_dir) # save results + self._run(save=True, labels=labels, save_dir=save_dir) # save results def crop(self, save=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None - return self.display(crop=True, save=save, save_dir=save_dir) # crop results + return self._run(crop=True, save=save, save_dir=save_dir) # crop results def render(self, labels=True): - self.display(render=True, labels=labels) # render results + self._run(render=True, labels=labels) # render results return self.ims def pandas(self): @@ -779,12 +777,17 @@ def tolist(self): # setattr(d, k, getattr(d, k)[0]) # pop out of list return x - def __len__(self): - return self.n # override len(results) + def print(self): + LOGGER.info(self.__str__()) + + def __len__(self): # override len(results) + return self.n + + def __str__(self): # override print(results) + return self._run(pprint=True) # print results - def __str__(self): - self.print() # override print(results) - return '' + def __repr__(self): + return f'YOLOv5 {self.__class__} instance\n' + self.__str__() class Proto(nn.Module): From db06f495db02501ef94efe46171d952642dec880 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 20:44:56 +0200 Subject: [PATCH 130/326] AutoUpdate TensorFlow in export.py (#9447) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/export.py b/export.py index cc4386ae4916..a575c73e375f 100644 --- a/export.py +++ b/export.py @@ -309,6 +309,7 @@ def export_saved_model(model, keras=False, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export + check_requirements('tensorflow' if torch.cuda.is_available() else 'tensorflow-cpu') import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 From 5e1a9553fbed73995c9b81e63ba41cc70fdf89de Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 21:46:07 +0200 Subject: [PATCH 131/326] AutoBatch `cudnn.benchmark=True` fix (#9448) * AutoBatch `cudnn.benchmark=True` fix May resolve https://github.com/ultralytics/yolov5/issues/9287 Signed-off-by: Glenn Jocher * Update autobatch.py Signed-off-by: Glenn Jocher * Update autobatch.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/autobatch.py | 3 +++ utils/general.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 641b055b9fe3..3204fd26fc41 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -33,6 +33,9 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): if device.type == 'cpu': LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') return batch_size + if torch.backends.cudnn.benchmark: + LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') + return batch_size # Inspect CUDA memory gb = 1 << 30 # bytes to GiB (1024 ** 3) diff --git a/utils/general.py b/utils/general.py index 8633511f89f5..af95b3dc2b8b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -223,7 +223,7 @@ def init_seeds(seed=0, deterministic=False): torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe - torch.backends.cudnn.benchmark = True # for faster training + # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 torch.use_deterministic_algorithms(True) torch.backends.cudnn.deterministic = True From 4a4308001ce1699fca2d9566b652e2388a088973 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Sep 2022 15:19:43 +0200 Subject: [PATCH 132/326] Do not move downloaded zips (#9455) * Do not move downloaded zips Prevent multiple downloads on HUB of same dataset @kalenmike Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index af95b3dc2b8b..4d080f282ed0 100644 --- a/utils/general.py +++ b/utils/general.py @@ -568,10 +568,10 @@ def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry def download_one(url, dir): # Download 1 file success = True - f = dir / Path(url).name # filename - if Path(url).is_file(): # exists in current path - Path(url).rename(f) # move to dir - elif not f.exists(): + if Path(url).is_file(): + f = Path(url) # filename + else: # does not exist + f = dir / Path(url).name LOGGER.info(f'Downloading {url} to {f}...') for i in range(retry + 1): if curl: From 6a9fffd19a96799c683c94d2d4da8c453e819116 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Sep 2022 15:42:24 +0200 Subject: [PATCH 133/326] Update general.py (#9454) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/utils/general.py b/utils/general.py index 4d080f282ed0..38856b6bfa1c 100644 --- a/utils/general.py +++ b/utils/general.py @@ -469,8 +469,7 @@ def check_dataset(data, autodownload=True): # Read yaml (optional) if isinstance(data, (str, Path)): - with open(data, errors='ignore') as f: - data = yaml.safe_load(f) # dictionary + data = yaml_load(data) # dictionary # Checks for k in 'train', 'val', 'names': @@ -485,7 +484,13 @@ def check_dataset(data, autodownload=True): path = (ROOT / path).resolve() for k in 'train', 'val', 'test': if data.get(k): # prepend path - data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + if isinstance(data[k], str): + x = (path / data[k]).resolve() + if not x.exists() and data[k].startswith('../'): + x = (path / data[k][3:]).resolve() + data[k] = str(x) + else: + data[k] = [str((path / x).resolve()) for x in data[k]] # Parse yaml train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) @@ -496,13 +501,12 @@ def check_dataset(data, autodownload=True): if not s or not autodownload: raise Exception('Dataset not found ❌') t = time.time() - root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) - Path(root).mkdir(parents=True, exist_ok=True) # create root - ZipFile(f).extractall(path=root) # unzip + Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root + ZipFile(f).extractall(path=DATASETS_DIR) # unzip Path(f).unlink() # remove zip r = None # success elif s.startswith('bash '): # bash script @@ -511,7 +515,7 @@ def check_dataset(data, autodownload=True): else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌" + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" LOGGER.info(f"Dataset download {s}") check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts return data # dictionary From 060837406542c5c65301b8fde641f4d92a1f395e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Sep 2022 23:17:59 +0200 Subject: [PATCH 134/326] `Detect()` and `Segment()` fixes for CoreML and Paddle (#9458) * Detect() and Segment() fixes for CoreML and Paddle Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/yolo.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 46039c36d7e1..0dca6353a356 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -64,17 +64,17 @@ def forward(self, x): if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) - y = x[i].clone() - y[..., :5 + self.nc].sigmoid_() - if self.inplace: - y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy, wh, etc = y.split((2, 2, self.no - 4), 4) # tensor_split((2, 4, 5), 4) if torch 1.8.0 + if isinstance(self, Segment): # (boxes + masks) + xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) + xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy + wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) + else: # Detect (boxes only) + xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, etc), 4) - z.append(y.view(bs, -1, self.no)) + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) From afb9860522e5023d64f4fd36fb78b6f26011f760 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 00:17:31 +0200 Subject: [PATCH 135/326] Add Paddle exports to benchmarks (#9459) * Add Paddle exports to benchmarks Signed-off-by: Glenn Jocher * Update plots.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- benchmarks.py | 2 +- models/common.py | 10 ++++------ utils/segment/plots.py | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index 58e083c95d55..161af73c1eda 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -65,7 +65,7 @@ def run( model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: - assert i not in (9, 10, 11), 'inference not supported' # Edge TPU, TF.js and Paddle are unsupported + assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML if 'cpu' in device.type: assert cpu, 'inference not supported on CPU' diff --git a/models/common.py b/models/common.py index 9c08120fe7f6..2b61307ad46b 100644 --- a/models/common.py +++ b/models/common.py @@ -460,8 +460,8 @@ def wrap_frozen_graph(gd, inputs, outputs): if cuda: config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) predictor = pdi.create_predictor(config) - input_names = predictor.get_input_names() - input_handle = predictor.get_input_handle(input_names[0]) + input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) + output_names = predictor.get_output_names() else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -517,12 +517,10 @@ def forward(self, im, augment=False, visualize=False): k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key y = y[k] # output elif self.paddle: # PaddlePaddle - im = im.cpu().numpy().astype("float32") + im = im.cpu().numpy().astype(np.float32) self.input_handle.copy_from_cpu(im) self.predictor.run() - output_names = self.predictor.get_output_names() - output_handle = self.predictor.get_output_handle(output_names[0]) - y = output_handle.copy_to_cpu() + y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel diff --git a/utils/segment/plots.py b/utils/segment/plots.py index e882c14390f0..9b90900b3772 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -99,9 +99,9 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' if mh != h or mw != w: mask = image_masks[j].astype(np.uint8) mask = cv2.resize(mask, (w, h)) - mask = mask.astype(np.bool) + mask = mask.astype(bool) else: - mask = image_masks[j].astype(np.bool) + mask = image_masks[j].astype(bool) with contextlib.suppress(Exception): im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 annotator.fromarray(im) From e8a9c5ae41b53f756e46de1190831b14b53c3b24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 00:57:48 +0200 Subject: [PATCH 136/326] Add `macos-latest` runner for CoreML benchmarks (#9453) * Add `macos-latest` runner for CoreML benchmarks Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 2b61307ad46b..825a4c4e2633 100644 --- a/models/common.py +++ b/models/common.py @@ -514,8 +514,7 @@ def forward(self, im, augment=False, visualize=False): conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) else: - k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key - y = y[k] # output + y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) elif self.paddle: # PaddlePaddle im = im.cpu().numpy().astype(np.float32) self.input_handle.copy_from_cpu(im) From 8ae81a6c87ebbf6a25c4dc2c77ef443b1d84098a Mon Sep 17 00:00:00 2001 From: Junjie Zhang <46258221+Oswells@users.noreply.github.com> Date: Sun, 18 Sep 2022 18:27:43 +0800 Subject: [PATCH 137/326] Fix cutout bug (#9452) * fix cutout bug Signed-off-by: Junjie Zhang <46258221+Oswells@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Junjie Zhang <46258221+Oswells@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/augmentations.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index a5587351f75b..f49110f43c6a 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -12,7 +12,7 @@ import torchvision.transforms as T import torchvision.transforms.functional as TF -from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy from utils.metrics import bbox_ioa IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean @@ -281,7 +281,7 @@ def cutout(im, labels, p=0.5): # return unobscured labels if len(labels) and s > 0.03: box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area labels = labels[ioa < 0.60] # remove >60% obscured labels return labels From 95cef1ae6b3bdf4ced616a2b6f3c9655803e9ea7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 12:42:23 +0200 Subject: [PATCH 138/326] Optimize imports (#9464) * Optimize imports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reformat * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/train.py | 2 -- utils/loggers/clearml/clearml_utils.py | 1 + utils/loggers/comet/hpo.py | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/segment/train.py b/segment/train.py index bda379176151..8abd0944551d 100644 --- a/segment/train.py +++ b/segment/train.py @@ -39,8 +39,6 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -import torch.nn.functional as F - import segment.val as validate # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import SegmentationModel diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 1e136907367d..eb1c12ce6cac 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -11,6 +11,7 @@ try: import clearml from clearml import Dataset, Task + assert hasattr(clearml, '__version__') # verify package import not local dir except (ImportError, AssertionError): clearml = None diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index eab4df9978cf..7dd5c92e8de1 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -14,7 +14,7 @@ if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -from train import parse_opt, train +from train import train from utils.callbacks import Callbacks from utils.general import increment_path from utils.torch_utils import select_device From dc42e6ef2232979e6f0f606da670f42c6d59108c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 14:45:08 +0200 Subject: [PATCH 139/326] TensorRT SegmentationModel fix (#9465) * TensorRT SegmentationModel fix * TensorRT SegmentationModel fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TensorRT SegmentationModel fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TensorRT SegmentationModel fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TensorRT SegmentationModel fix * TensorRT SegmentationModel fix * fix * sort output names * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 23 ++++++++++++----------- models/common.py | 27 ++++++++++++++++----------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/export.py b/export.py index a575c73e375f..9955870e9e43 100644 --- a/export.py +++ b/export.py @@ -66,7 +66,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.experimental import attempt_load -from models.yolo import ClassificationModel, Detect +from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel from utils.dataloaders import LoadImages from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) @@ -134,6 +134,15 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') + output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0'] + if dynamic: + dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) + if isinstance(model, SegmentationModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) + elif isinstance(model, DetectionModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + torch.onnx.export( model.cpu() if dynamic else model, # --dynamic only compatible with cpu im.cpu() if dynamic else im, @@ -142,16 +151,8 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX opset_version=opset, do_constant_folding=True, input_names=['images'], - output_names=['output'], - dynamic_axes={ - 'images': { - 0: 'batch', - 2: 'height', - 3: 'width'}, # shape(1,3,640,640) - 'output': { - 0: 'batch', - 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) + output_names=output_names, + dynamic_axes=dynamic or None) # Checks model_onnx = onnx.load(f) # load onnx model diff --git a/models/common.py b/models/common.py index 825a4c4e2633..d0bc65e02f91 100644 --- a/models/common.py +++ b/models/common.py @@ -390,18 +390,21 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, model = runtime.deserialize_cuda_engine(f.read()) context = model.create_execution_context() bindings = OrderedDict() + output_names = [] fp16 = False # default updated below dynamic = False - for index in range(model.num_bindings): - name = model.get_binding_name(index) - dtype = trt.nptype(model.get_binding_dtype(index)) - if model.binding_is_input(index): - if -1 in tuple(model.get_binding_shape(index)): # dynamic + for i in range(model.num_bindings): + name = model.get_binding_name(i) + dtype = trt.nptype(model.get_binding_dtype(i)) + if model.binding_is_input(i): + if -1 in tuple(model.get_binding_shape(i)): # dynamic dynamic = True - context.set_binding_shape(index, tuple(model.get_profile_shape(0, index)[2])) + context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) if dtype == np.float16: fp16 = True - shape = tuple(context.get_binding_shape(index)) + else: # output + output_names.append(name) + shape = tuple(context.get_binding_shape(i)) im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) @@ -495,15 +498,17 @@ def forward(self, im, augment=False, visualize=False): y = list(self.executable_network([im]).values()) elif self.engine: # TensorRT if self.dynamic and im.shape != self.bindings['images'].shape: - i_in, i_out = (self.model.get_binding_index(x) for x in ('images', 'output')) - self.context.set_binding_shape(i_in, im.shape) # reshape if dynamic + i = self.model.get_binding_index('images') + self.context.set_binding_shape(i, im.shape) # reshape if dynamic self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) - self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) + for name in self.output_names: + i = self.model.get_binding_index(name) + self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) s = self.bindings['images'].shape assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" self.binding_addrs['images'] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) - y = self.bindings['output'].data + y = [self.bindings[x].data for x in sorted(self.output_names)] elif self.coreml: # CoreML im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) im = Image.fromarray((im[0] * 255).astype('uint8')) From 4d50cd3469d75b18e99ce1e831ca024e3d25a2d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 15:02:04 +0200 Subject: [PATCH 140/326] `Conv()` dilation argument fix (#9466) Resolves https://github.com/ultralytics/yolov5/issues/9384 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/common.py b/models/common.py index d0bc65e02f91..33db74dcd9ae 100644 --- a/models/common.py +++ b/models/common.py @@ -232,7 +232,7 @@ class Focus(nn.Module): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() - self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act) # self.contract = Contract(gain=2) def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) @@ -245,8 +245,8 @@ class GhostConv(nn.Module): def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups super().__init__() c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + self.cv1 = Conv(c1, c_, k, s, None, g, act=act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act) def forward(self, x): y = self.cv1(x) From 295c5e9d3ce70f5dbdb897c2da6a58e58f7c1125 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 16:13:22 +0200 Subject: [PATCH 141/326] Update ClassificationModel default training `imgsz=224` (#9469) Update ClassificationModel default training imgsz=224 To match classify/val.py and classify/predict.py Helps https://github.com/ultralytics/yolov5/issues/9462 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/classify/train.py b/classify/train.py index 223367260bad..23c90e0a5274 100644 --- a/classify/train.py +++ b/classify/train.py @@ -3,7 +3,7 @@ Train a YOLOv5 classifier model on a classification dataset Usage - Single-GPU training: - $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 128 + $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 Usage - Multi-GPU DDP training: $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 @@ -272,7 +272,7 @@ def parse_opt(known=False): parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...') parser.add_argument('--epochs', type=int, default=10, help='total training epochs') parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=128, help='train, val image size (pixels)') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') From ca9c993d6c3c9f59c44d28b22d8968709cd11693 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 16:15:25 +0200 Subject: [PATCH 142/326] =?UTF-8?q?Standardize=20warnings=20with=20`WARNIN?= =?UTF-8?q?G=20=20=E2=9A=A0=EF=B8=8F=20...`=20(#9467)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Standardize warnings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- benchmarks.py | 2 +- classify/train.py | 2 +- export.py | 2 +- hubconf.py | 2 +- segment/train.py | 2 +- segment/val.py | 6 +++--- train.py | 2 +- utils/__init__.py | 10 ++++++++-- utils/autoanchor.py | 4 ++-- utils/autobatch.py | 2 +- utils/dataloaders.py | 18 +++++++++--------- utils/general.py | 21 ++++++++------------- utils/loggers/__init__.py | 4 ++-- utils/metrics.py | 2 +- utils/segment/dataloaders.py | 2 +- utils/torch_utils.py | 2 +- val.py | 6 +++--- 17 files changed, 45 insertions(+), 44 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index 161af73c1eda..b3b58eb3257c 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -91,7 +91,7 @@ def run( except Exception as e: if hard_fail: assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' - LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') + LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}') y.append([name, None, None, None]) # mAP, t_inference if pt_only and i == 0: break # break after PyTorch diff --git a/classify/train.py b/classify/train.py index 23c90e0a5274..178ebcdfff53 100644 --- a/classify/train.py +++ b/classify/train.py @@ -114,7 +114,7 @@ def train(opt, device): m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m)) if isinstance(model, DetectionModel): - LOGGER.warning("WARNING: pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") + LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model reshape_classifier_output(model, nc) # update class count for m in model.modules(): diff --git a/export.py b/export.py index 9955870e9e43..ac9b13db8ec0 100644 --- a/export.py +++ b/export.py @@ -282,7 +282,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f"{prefix}WARNING ⚠️ --dynamic model requires maximum --batch-size argument") profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) diff --git a/hubconf.py b/hubconf.py index 2f05565629a5..4224760a4732 100644 --- a/hubconf.py +++ b/hubconf.py @@ -47,7 +47,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING: ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + LOGGER.warning('WARNING ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS diff --git a/segment/train.py b/segment/train.py index 8abd0944551d..5121c5fa784a 100644 --- a/segment/train.py +++ b/segment/train.py @@ -176,7 +176,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) diff --git a/segment/val.py b/segment/val.py index 138aa00aaed3..59ab76672a30 100644 --- a/segment/val.py +++ b/segment/val.py @@ -345,7 +345,7 @@ def run( pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: - LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): @@ -438,9 +438,9 @@ def main(opt): if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') if opt.save_hybrid: - LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') + LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') run(**vars(opt)) else: diff --git a/train.py b/train.py index 4eff6e5d645a..9efece250581 100644 --- a/train.py +++ b/train.py @@ -173,7 +173,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) diff --git a/utils/__init__.py b/utils/__init__.py index 46225c2208ce..8403a6149827 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -4,9 +4,15 @@ """ import contextlib +import platform import threading +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + class TryExcept(contextlib.ContextDecorator): # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager def __init__(self, msg=''): @@ -17,7 +23,7 @@ def __enter__(self): def __exit__(self, exc_type, value, traceback): if value: - print(f'{self.msg}{value}') + print(emojis(f'{self.msg}{value}')) return True @@ -38,7 +44,7 @@ def notebook_init(verbose=True): import os import shutil - from utils.general import check_font, check_requirements, emojis, is_colab + from utils.general import check_font, check_requirements, is_colab from utils.torch_utils import select_device # imports check_requirements(('psutil', 'IPython')) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 0b49ab3319c0..7e7e9985d68a 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -122,7 +122,7 @@ def print_results(k, verbose=True): # Filter i = (wh0 < 3.0).any(1).sum() if i: - LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') + LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size') wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 @@ -134,7 +134,7 @@ def print_results(k, verbose=True): k = kmeans(wh / s, n, iter=30)[0] * s # points assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar except Exception: - LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') + LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init') k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) k = print_results(k, verbose=False) diff --git a/utils/autobatch.py b/utils/autobatch.py index 3204fd26fc41..49435f51a244 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -65,7 +65,7 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): b = batch_sizes[max(i - 1, 0)] # select prior safe point if b < 1 or b > 1024: # b outside of safe range b = batch_size - LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') + LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') fraction = np.polyval(p, b) / t # actual fraction predicted LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') diff --git a/utils/dataloaders.py b/utils/dataloaders.py index c04be853c580..5c3460eb0d6e 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -116,7 +116,7 @@ def create_dataloader(path, prefix='', shuffle=False): if rect and shuffle: - LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabels( @@ -328,7 +328,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr self.auto = auto and self.rect self.transforms = transforms # optional if not self.rect: - LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') + LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') def update(self, i, cap, stream): # Read stream `i` frames in daemon thread @@ -341,7 +341,7 @@ def update(self, i, cap, stream): if success: self.imgs[i] = im else: - LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') self.imgs[i] = np.zeros_like(self.imgs[i]) cap.open(stream) # re-open stream if signal was lost time.sleep(0.0) # wait time @@ -543,7 +543,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if msgs: LOGGER.info('\n'.join(msgs)) if nf == 0: - LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. {HELP_URL}') + LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') x['hash'] = get_hash(self.label_files + self.im_files) x['results'] = nf, nm, ne, nc, len(self.im_files) x['msgs'] = msgs # warnings @@ -553,7 +553,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): path.with_suffix('.cache.npy').rename(path) # remove .npy suffix LOGGER.info(f'{prefix}New cache created: {path}') except Exception as e: - LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable + LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable return x def __len__(self): @@ -917,7 +917,7 @@ def verify_image_label(args): f.seek(-2, 2) if f.read() != b'\xff\xd9': # corrupt JPEG ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) - msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved' + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' # verify labels if os.path.isfile(lb_file): @@ -939,7 +939,7 @@ def verify_image_label(args): lb = lb[i] # remove duplicates if segments: segments = [segments[x] for x in i] - msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' else: ne = 1 # label empty lb = np.zeros((0, 5), dtype=np.float32) @@ -949,7 +949,7 @@ def verify_image_label(args): return im_file, lb, shape, segments, nm, nf, ne, nc, msg except Exception as e: nc = 1 - msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}' + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' return [None, None, None, None, nm, nf, ne, nc, msg] @@ -1012,7 +1012,7 @@ def _hub_ops(self, f, max_dim=1920): im = im.resize((int(im.width * r), int(im.height * r))) im.save(f_new, 'JPEG', quality=50, optimize=True) # save except Exception as e: # use OpenCV - print(f'WARNING: HUB ops PIL failure {f}: {e}') + LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') im = cv2.imread(f) im_height, im_width = im.shape[:2] r = max_dim / max(im_height, im_width) # ratio diff --git a/utils/general.py b/utils/general.py index 38856b6bfa1c..fd0b4090a0fa 100644 --- a/utils/general.py +++ b/utils/general.py @@ -34,7 +34,7 @@ import torchvision import yaml -from utils import TryExcept +from utils import TryExcept, emojis from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness @@ -248,11 +248,6 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' -def emojis(str=''): - # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - - def file_age(path=__file__): # Return days since last file update dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta @@ -333,7 +328,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool - s = f'WARNING: ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string + s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, emojis(s) # assert min requirements met if verbose and not result: @@ -373,7 +368,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" LOGGER.info(s) except Exception as e: - LOGGER.warning(f'{prefix} {e}') + LOGGER.warning(f'{prefix} ❌ {e}') def check_img_size(imgsz, s=32, floor=0): @@ -384,7 +379,7 @@ def check_img_size(imgsz, s=32, floor=0): imgsz = list(imgsz) # convert to list if tuple new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] if new_size != imgsz: - LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size @@ -399,7 +394,7 @@ def check_imshow(): cv2.waitKey(1) return True except Exception as e: - LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') return False @@ -589,9 +584,9 @@ def download_one(url, dir): if success: break elif i < retry: - LOGGER.warning(f'Download failure, retrying {i + 1}/{retry} {url}...') + LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') else: - LOGGER.warning(f'Failed to download {url}...') + LOGGER.warning(f'❌ Failed to download {url}...') if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): LOGGER.info(f'Unzipping {f}...') @@ -908,7 +903,7 @@ def non_max_suppression( output[xi] = x[i] if (time.time() - t) > time_limit: - LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded') + LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') break # time limit exceeded return output diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index f29debb76907..941d09e19e2d 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -11,7 +11,7 @@ import torch from torch.utils.tensorboard import SummaryWriter -from utils.general import colorstr, cv2 +from utils.general import LOGGER, colorstr, cv2 from utils.loggers.clearml.clearml_utils import ClearmlLogger from utils.loggers.wandb.wandb_utils import WandbLogger from utils.plots import plot_images, plot_labels, plot_results @@ -393,7 +393,7 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): warnings.simplefilter('ignore') # suppress jit trace warning tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) except Exception as e: - print(f'WARNING: TensorBoard graph visualization failure {e}') + LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}') def web_project_name(project): diff --git a/utils/metrics.py b/utils/metrics.py index 021a46ce5d37..ed611d7d38fa 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -186,7 +186,7 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class - @TryExcept('WARNING: ConfusionMatrix plot failure: ') + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure: ') def plot(self, normalize=True, save_dir='', names=()): import seaborn as sn diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index f6fe642d077f..d137caa5ab27 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -37,7 +37,7 @@ def create_dataloader(path, mask_downsample_ratio=1, overlap_mask=False): if rect and shuffle: - LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabelsAndMasks( diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 8a3366ca3e27..9f257d06ac60 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -47,7 +47,7 @@ def smartCrossEntropyLoss(label_smoothing=0.0): if check_version(torch.__version__, '1.10.0'): return nn.CrossEntropyLoss(label_smoothing=label_smoothing) if label_smoothing > 0: - LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') + LOGGER.warning(f'WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0') return nn.CrossEntropyLoss() diff --git a/val.py b/val.py index e003d2144b7f..3ab4bc3fdb58 100644 --- a/val.py +++ b/val.py @@ -282,7 +282,7 @@ def run( pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) if nt.sum() == 0: - LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): @@ -374,9 +374,9 @@ def main(opt): if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') if opt.save_hybrid: - LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') + LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone') run(**vars(opt)) else: From 92b52424d468feb48c51c3dde173d5d2c606a44b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 17:34:34 +0200 Subject: [PATCH 143/326] TensorFlow macOS AutoUpdate (#9471) * TensorFlow macOS AutoUpdate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 11 ++++++++--- requirements.txt | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index ac9b13db8ec0..ae292afe06f6 100644 --- a/export.py +++ b/export.py @@ -72,6 +72,8 @@ check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) from utils.torch_utils import select_device, smart_inference_mode +MACOS = platform.system() == 'Darwin' # macOS environment + def export_formats(): # YOLOv5 export formats @@ -224,7 +226,7 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) if bits < 32: - if platform.system() == 'Darwin': # quantization only supported on macOS + if MACOS: # quantization only supported on macOS with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) @@ -310,8 +312,11 @@ def export_saved_model(model, keras=False, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export - check_requirements('tensorflow' if torch.cuda.is_available() else 'tensorflow-cpu') - import tensorflow as tf + try: + import tensorflow as tf + except Exception: + check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}") + import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 from models.tf import TFModel diff --git a/requirements.txt b/requirements.txt index 44fe1ce697b7..835346f218a4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ seaborn>=0.11.0 # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export # scikit-learn==0.19.2 # CoreML quantization -# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) +# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From 120e27e38efd4351b5e5bb5d735635f4cbf1bc86 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 19:34:10 +0200 Subject: [PATCH 144/326] `classify/predict --save-txt` fix (#9478) Classify --save-txt Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/predict.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 878cf48b6fef..4857c69766e7 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -119,13 +119,15 @@ def run( for i, prob in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 - p, im0 = path[i], im0s[i].copy() + p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: - p, im0 = path, im0s.copy() + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string annotator = Annotator(im0, example=str(names), pil=True) @@ -134,9 +136,12 @@ def run( s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, " # Write results + text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) if save_img or view_img: # Add bbox to image - text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) annotator.text((32, 32), text, txt_color=(255, 255, 255)) + if save_txt: # Write to file + with open(f'{txt_path}.txt', 'a') as f: + f.write(text + '\n') # Stream results im0 = annotator.result() @@ -188,7 +193,7 @@ def parse_opt(): parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-txt', action='store_false', help='save results to *.txt') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') From fda8aa551d0b732153c2e0848dd6abd887a41cd1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 19:52:46 +0200 Subject: [PATCH 145/326] TensorFlow SegmentationModel support (#9472) * TensorFlow SegmentationModel support * TensorFlow SegmentationModel support * TensorFlow SegmentationModel support * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TFLite fixes * GraphDef fixes * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- export.py | 2 +- models/common.py | 29 ++++++++++++++++++++--------- models/tf.py | 15 ++++++++------- 4 files changed, 30 insertions(+), 18 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 537ba96e7225..fffc92d1b72f 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -43,7 +43,7 @@ jobs: python benchmarks.py --data coco128.yaml --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 - name: Benchmark SegmentationModel run: | - python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 + python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 --hard-fail 0.22 Tests: timeout-minutes: 60 diff --git a/export.py b/export.py index ae292afe06f6..fe4e53d06cc3 100644 --- a/export.py +++ b/export.py @@ -341,7 +341,7 @@ def export_saved_model(model, m = m.get_concrete_function(spec) frozen_func = convert_variables_to_constants_v2(m) tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) + tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec]) tfm.__call__(im) tf.saved_model.save(tfm, f, diff --git a/models/common.py b/models/common.py index 33db74dcd9ae..fac95a82fdb9 100644 --- a/models/common.py +++ b/models/common.py @@ -427,10 +427,17 @@ def wrap_frozen_graph(gd, inputs, outputs): ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + def gd_outputs(gd): + name_list, input_list = [], [] + for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef + name_list.append(node.name) + input_list.extend(node.input) + return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp')) + gd = tf.Graph().as_graph_def() # TF GraphDef with open(w, 'rb') as f: gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") + frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate @@ -528,22 +535,26 @@ def forward(self, im, augment=False, visualize=False): else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel - y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() + y = self.model(im, training=False) if self.keras else self.model(im) elif self.pb: # GraphDef - y = self.frozen_func(x=self.tf.constant(im)).numpy() + y = self.frozen_func(x=self.tf.constant(im)) else: # Lite or Edge TPU - input, output = self.input_details[0], self.output_details[0] + input = self.input_details[0] int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model if int8: scale, zero_point = input['quantization'] im = (im / scale + zero_point).astype(np.uint8) # de-scale self.interpreter.set_tensor(input['index'], im) self.interpreter.invoke() - y = self.interpreter.get_tensor(output['index']) - if int8: - scale, zero_point = output['quantization'] - y = (y.astype(np.float32) - zero_point) * scale # re-scale - y[..., :4] *= [w, h, w, h] # xywh normalized to pixels + y = [] + for output in self.output_details: + x = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + x = (x.astype(np.float32) - zero_point) * scale # re-scale + y.append(x) + y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] + y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels if isinstance(y, (list, tuple)): return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] diff --git a/models/tf.py b/models/tf.py index 8cce147059d3..ae58ca738e2e 100644 --- a/models/tf.py +++ b/models/tf.py @@ -299,15 +299,15 @@ def call(self, inputs): x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) if not self.training: # inference - y = tf.sigmoid(x[i]) + y = x[i] grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 - xy = (y[..., 0:2] * 2 + grid) * self.stride[i] # xy - wh = y[..., 2:4] ** 2 * anchor_grid + xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy + wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid # Normalize xywh to 0-1 to reduce calibration error xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) - y = tf.concat([xy, wh, y[..., 4:]], -1) + y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) @@ -333,8 +333,9 @@ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w def call(self, x): p = self.proto(x[0]) + p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160) x = self.detect(self, x) - return (x, p) if self.training else ((x[0], p),) + return (x, p) if self.training else (x[0], p) class TFProto(keras.layers.Layer): @@ -485,8 +486,8 @@ def predict(self, conf_thres, clip_boxes=False) return nms, x[1] - return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] - # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85) + return x # output [1,6300,85] = [xywh, conf, class0, class1, ...] + # x = x[0] # [x(1,6300,85), ...] to x(6300,85) # xywh = x[..., :4] # x(6300,4) boxes # conf = x[..., 4:5] # x(6300,1) confidences # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes From f038ad71729960facad54407e1b353b0e81242e2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Sep 2022 12:18:55 +0200 Subject: [PATCH 146/326] AutoBatch report include reserved+allocated (#9491) May resolve https://github.com/ultralytics/yolov5/issues/9287#issuecomment-1250767031 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/autobatch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 49435f51a244..bdeb91c3d2bd 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -19,7 +19,7 @@ def check_train_batch_size(model, imgsz=640, amp=True): def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): - # Automatically estimate best batch size to use `fraction` of available CUDA memory + # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory # Usage: # import torch # from utils.autobatch import autobatch @@ -67,6 +67,6 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): b = batch_size LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') - fraction = np.polyval(p, b) / t # actual fraction predicted + fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') return b From 868c0e9bbb45b031e7bfd73c6d3983bcce07b9c1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Sep 2022 13:31:24 +0200 Subject: [PATCH 147/326] Update Detect() grid init `for` loop (#9494) May resolve threaded inference issue in https://github.com/ultralytics/yolov5/pull/9425#issuecomment-1250802928 by avoiding memory sharing on init. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 0dca6353a356..1d0da2a6e010 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -47,8 +47,8 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.empty(1)] * self.nl # init grid - self.anchor_grid = [torch.empty(1)] * self.nl # init anchor grid + self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid + self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) From 11640698977724daf7982c9da398c2ee2f2b6e91 Mon Sep 17 00:00:00 2001 From: mucunwuxian Date: Mon, 19 Sep 2022 21:01:46 +0900 Subject: [PATCH 148/326] Accelerate video inference (#9487) * The following code is slow, "self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride". * adjust... * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5c3460eb0d6e..5b03b4eb9759 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -232,8 +232,9 @@ def __next__(self): if self.video_flag[self.count]: # Read video self.mode = 'video' - ret_val, im0 = self.cap.read() - self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride + for _ in range(self.vid_stride): + self.cap.grab() + ret_val, im0 = self.cap.retrieve() while not ret_val: self.count += 1 self.cap.release() From 0b724c5b851b32bb3a8fbfab3cc2d68f93b4661e Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 19 Sep 2022 11:26:19 -0400 Subject: [PATCH 149/326] Comet Image Logging Fix (#9498) fix issues with image logging --- utils/loggers/comet/__init__.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 4ee86dd70d6e..3b3142b002c5 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -22,6 +22,7 @@ comet_ml = None COMET_PROJECT_NAME = None +import PIL import torch import torchvision.transforms as T import yaml @@ -131,6 +132,8 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar else: self.iou_thres = IOU_THRES + self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) + self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 @@ -139,6 +142,7 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar if self.comet_log_predictions: self.metadata_dict = {} + self.logged_image_names = [] self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS @@ -249,11 +253,12 @@ def log_predictions(self, image, labelsn, path, shape, predn): filtered_detections = detections[mask] filtered_labels = labelsn[mask] - processed_image = (image * 255).to(torch.uint8) - image_id = path.split("/")[-1].split(".")[0] image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" - self.log_image(to_pil(processed_image), name=image_name) + if image_name not in self.logged_image_names: + native_scale_image = PIL.Image.open(path) + self.log_image(native_scale_image, name=image_name) + self.logged_image_names.append(image_name) metadata = [] for cls, *xyxy in filtered_labels.tolist(): From 0171198f38f36c55090c91c49a7b5abacd571324 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Sep 2022 20:38:11 +0200 Subject: [PATCH 150/326] Fix visualization title bug (#9500) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/plots.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index d8d5b225a774..51bb7d6c20af 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -204,7 +204,6 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec ax[i].axis('off') LOGGER.info(f'Saving {f}... ({n}/{channels})') - plt.title('Features') plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save From 63368e71d23e453ded1d94094a2b43b75c1a54fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9B=BE=E9=80=B8=E5=A4=AB=EF=BC=88Zeng=20Yifu=EF=BC=89?= <41098760+Zengyf-CVer@users.noreply.github.com> Date: Tue, 20 Sep 2022 07:11:29 +0800 Subject: [PATCH 151/326] Add paddle tips (#9502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update export.py Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index fe4e53d06cc3..04c2ed9c802d 100644 --- a/export.py +++ b/export.py @@ -596,10 +596,11 @@ def parse_opt(): parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') - parser.add_argument('--include', - nargs='+', - default=['torchscript'], - help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') + parser.add_argument( + '--include', + nargs='+', + default=['torchscript'], + help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') opt = parser.parse_args() print_args(vars(opt)) return opt From 095f601d9d32ea0f0afd47554c068659939ecf4e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 12:22:02 +0200 Subject: [PATCH 152/326] Segmentation `polygons2masks_overlap()` in `np.int32` (#9493) * Segmentation `polygons2masks_overlap()` in `np.int32` May resolve https://github.com/ultralytics/yolov5/issues/9461 WARNING: Masks should be uint8 for fastest speed, change needs profiling results to determine impact. @AyushExel @Laughing-q Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/segment/dataloaders.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index d137caa5ab27..49575f065752 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -308,7 +308,8 @@ def polygons2masks(img_size, polygons, color, downsample_ratio=1): def polygons2masks_overlap(img_size, segments, downsample_ratio=1): """Return a (640, 640) overlap mask.""" - masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), dtype=np.uint8) + masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) areas = [] ms = [] for si in range(len(segments)): From f8b74631e50bcac1bef8a52283102a5feb7217a6 Mon Sep 17 00:00:00 2001 From: FeiGeChuanShu <774074168@qq.com> Date: Tue, 20 Sep 2022 19:04:45 +0800 Subject: [PATCH 153/326] Fix `random_perspective` param bug in segment (#9512) * fix random_perspective param bug when mosaic=False Signed-off-by: FeiGeChuanShu <774074168@qq.com> * Update dataloaders.py * Update dataloaders.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: FeiGeChuanShu <774074168@qq.com> Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/segment/dataloaders.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 49575f065752..97ef8556068e 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -140,17 +140,14 @@ def __getitem__(self, index): labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: - img, labels, segments = random_perspective( - img, - labels, - segments=segments, - degrees=hyp["degrees"], - translate=hyp["translate"], - scale=hyp["scale"], - shear=hyp["shear"], - perspective=hyp["perspective"], - return_seg=True, - ) + img, labels, segments = random_perspective(img, + labels, + segments=segments, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"]) nl = len(labels) # number of labels if nl: From e233c038ed63780843446dd7bf00d5cc6a2711fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 16:38:04 +0200 Subject: [PATCH 154/326] Remove `check_requirements('flatbuffers==1.12')` (#9514) * Remove `check_requirements('flatbuffers==1.12')` Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/export.py b/export.py index 04c2ed9c802d..a2aa5e830c33 100644 --- a/export.py +++ b/export.py @@ -534,8 +534,6 @@ def run( if coreml: # CoreML f[4], _ = export_coreml(model, im, file, int8, half) if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats - if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 - check_requirements('flatbuffers==1.12') # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' f[5], s_model = export_saved_model(model.cpu(), From bd35191033d52a9e48e6c8faaeaaa009243b988f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 18:47:14 +0200 Subject: [PATCH 155/326] Fix TF Lite exports (#9517) * Update tf.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> From c0d97138456f2257f608c4120c8fd65abcf69326 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 19:01:03 +0200 Subject: [PATCH 156/326] TFLite fix 2 (#9518) * TFLite fix 2 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index ae58ca738e2e..0520c30a96df 100644 --- a/models/tf.py +++ b/models/tf.py @@ -310,7 +310,7 @@ def call(self, inputs): y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) - return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) + return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),) @staticmethod def _make_grid(nx=20, ny=20): From 77dcf55168d59131f75b8187c6be27172eec00ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 22:57:42 +0200 Subject: [PATCH 157/326] FROM nvcr.io/nvidia/pytorch:22.08-py3 (#9520) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 4b9367cc27db..764ee278c22e 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.07-py3 +FROM nvcr.io/nvidia/pytorch:22.08-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 6ebef288944ea3a8152f8e0c98a2aee0bd922144 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 15:12:12 +0200 Subject: [PATCH 158/326] Remove scikit-learn constraint on coremltools 6.0 (#9530) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 835346f218a4..75e7cc9e94d3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export -# scikit-learn==0.19.2 # CoreML quantization +# scikit-learn # CoreML quantization # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From 499a6bf5736a1b78341dfd142bd7c82f71ebf459 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 15:14:54 +0200 Subject: [PATCH 159/326] Update scikit-learn constraint per coremltools 6.0 (#9531) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 75e7cc9e94d3..17db73678fc1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export -# scikit-learn # CoreML quantization +# scikit-learn<=1.1.2 # CoreML quantization # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From db6847431b489a6b8d36c14f05e08970025d01a2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 17:55:25 +0200 Subject: [PATCH 160/326] Update `coremltools>=6.0` (#9532) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 17db73678fc1..55c1f2428e3f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,7 +24,7 @@ pandas>=1.1.4 seaborn>=0.11.0 # Export -------------------------------------- -# coremltools>=5.2 # CoreML export +# coremltools>=6.0 # CoreML export # onnx>=1.9.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export From 6f0284763b0f66467dc04e5a5d87e5a68d1d49cd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 19:53:26 +0200 Subject: [PATCH 161/326] Update albumentations (#9503) * Add `RandomResizedCrop(ratio)` * Update ratio * Update ratio * Update ratio * Update ratio * Update ratio * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create augmentations.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update augmentations.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/augmentations.py | 27 +++++++++++++++------------ utils/dataloaders.py | 2 +- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index f49110f43c6a..7c8e0bcdede6 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -21,7 +21,7 @@ class Albumentations: # YOLOv5 Albumentations class (optional, only used if package is installed) - def __init__(self): + def __init__(self, size=640): self.transform = None prefix = colorstr('albumentations: ') try: @@ -29,6 +29,7 @@ def __init__(self): check_version(A.__version__, '1.0.3', hard=True) # version requirement T = [ + A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), A.Blur(p=0.01), A.MedianBlur(p=0.01), A.ToGray(p=0.01), @@ -303,15 +304,17 @@ def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates -def classify_albumentations(augment=True, - size=224, - scale=(0.08, 1.0), - hflip=0.5, - vflip=0.0, - jitter=0.4, - mean=IMAGENET_MEAN, - std=IMAGENET_STD, - auto_aug=False): +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): # YOLOv5 classification Albumentations (optional, only used if package is installed) prefix = colorstr('albumentations: ') try: @@ -319,7 +322,7 @@ def classify_albumentations(augment=True, from albumentations.pytorch import ToTensorV2 check_version(A.__version__, '1.0.3', hard=True) # version requirement if augment: # Resize and crop - T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] + T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] if auto_aug: # TODO: implement AugMix, AutoAug & RandAug in albumentation LOGGER.info(f'{prefix}auto augmentations are currently not supported') @@ -338,7 +341,7 @@ def classify_albumentations(augment=True, return A.Compose(T) except ImportError: # package not installed, skip - pass + LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') except Exception as e: LOGGER.info(f'{prefix}{e}') diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5b03b4eb9759..ee79bd0bc5a5 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -404,7 +404,7 @@ def __init__(self, self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride self.path = path - self.albumentations = Albumentations() if augment else None + self.albumentations = Albumentations(size=img_size) if augment else None try: f = [] # image files From 999482b45163c1b808a187b02183f324a9c782cb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 23:08:52 +0200 Subject: [PATCH 162/326] import re (#9535) * import re Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/export.py b/export.py index a2aa5e830c33..e3cf392b0101 100644 --- a/export.py +++ b/export.py @@ -48,6 +48,7 @@ import json import os import platform +import re import subprocess import sys import time @@ -427,8 +428,6 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export check_requirements('tensorflowjs') - import re - import tensorflowjs as tfjs LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') From 489920ab30b217fed14d3ddd31c23e9afc5be238 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Sep 2022 00:34:35 +0200 Subject: [PATCH 163/326] TF.js fix (#9536) * TF.js fix May resolve https://github.com/ultralytics/yolov5/issues/9534 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index 0520c30a96df..1446d8841646 100644 --- a/models/tf.py +++ b/models/tf.py @@ -485,7 +485,7 @@ def predict(self, iou_thres, conf_thres, clip_boxes=False) - return nms, x[1] + return (nms,) return x # output [1,6300,85] = [xywh, conf, class0, class1, ...] # x = x[0] # [x(1,6300,85), ...] to x(6300,85) # xywh = x[..., :4] # x(6300,4) boxes From b25d5a75f2c89aace5cae342f3fe29dfdd46e401 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Sep 2022 23:23:40 +0200 Subject: [PATCH 164/326] Refactor dataset batch-size (#9551) --- classify/predict.py | 3 +-- detect.py | 3 +-- segment/predict.py | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 4857c69766e7..ef59ff6f550a 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -91,10 +91,9 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = 1 # batch_size + bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/detect.py b/detect.py index 310d169281bf..4015b9ae0d7f 100644 --- a/detect.py +++ b/detect.py @@ -99,10 +99,9 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = 1 # batch_size + bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/segment/predict.py b/segment/predict.py index ba4cf2905255..2ea6bd9327e0 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -101,10 +101,9 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = 1 # batch_size + bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference From 30fa9b610a3a6d9dc6a9e5961388710e5af0b704 Mon Sep 17 00:00:00 2001 From: zombob <2613669+zombob@users.noreply.github.com> Date: Fri, 23 Sep 2022 05:58:14 +0800 Subject: [PATCH 165/326] Add `--source screen` for screenshot inference (#9542) * add screenshot as source * fix: screen number support * Fix: mutiple screen specific area * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * parse screen args in LoadScreenshots * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * sequence+ '_' as file name for save-txt save-crop * screenshot as stream * Update requirements.txt Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: xin Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- README.md | 1 + classify/predict.py | 9 +++++--- detect.py | 9 +++++--- requirements.txt | 1 + segment/predict.py | 9 +++++--- tutorial.ipynb | 1 + utils/dataloaders.py | 49 ++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 70 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index da8bf1dad862..1d43111d56e7 100644 --- a/README.md +++ b/README.md @@ -107,6 +107,7 @@ the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and python detect.py --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube diff --git a/classify/predict.py b/classify/predict.py index ef59ff6f550a..011e7b83f09b 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -42,7 +42,7 @@ from models.common import DetectMultiBackend from utils.augmentations import classify_transforms -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, print_args, strip_optimizer) from utils.plots import Annotator @@ -52,7 +52,7 @@ @smart_inference_mode() def run( weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(224, 224), # inference size (height, width) device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu @@ -74,6 +74,7 @@ def run( is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download @@ -91,6 +92,8 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) bs = len(dataset) # batch_size @@ -187,7 +190,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') diff --git a/detect.py b/detect.py index 4015b9ae0d7f..9036b26263e5 100644 --- a/detect.py +++ b/detect.py @@ -40,7 +40,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import DetectMultiBackend -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box @@ -50,7 +50,7 @@ @smart_inference_mode() def run( weights=ROOT / 'yolov5s.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold @@ -82,6 +82,7 @@ def run( is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download @@ -99,6 +100,8 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) # batch_size @@ -212,7 +215,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') diff --git a/requirements.txt b/requirements.txt index 55c1f2428e3f..914da54e73fc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -38,6 +38,7 @@ seaborn>=0.11.0 ipython # interactive notebook psutil # system utilization thop>=0.1.1 # FLOPs computation +# mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0 # COCO mAP # roboflow diff --git a/segment/predict.py b/segment/predict.py index 2ea6bd9327e0..43cebc706371 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -40,7 +40,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import DetectMultiBackend -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box @@ -51,7 +51,7 @@ @smart_inference_mode() def run( weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold @@ -84,6 +84,7 @@ def run( is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download @@ -101,6 +102,8 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) # batch_size @@ -222,7 +225,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') diff --git a/tutorial.ipynb b/tutorial.ipynb index 957437b2be6d..f87cccd99df8 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -445,6 +445,7 @@ "python detect.py --source 0 # webcam\n", " img.jpg # image \n", " vid.mp4 # video\n", + " screen # screenshot\n", " path/ # directory\n", " 'path/*.jpg' # glob\n", " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", diff --git a/utils/dataloaders.py b/utils/dataloaders.py index ee79bd0bc5a5..7aee0b891161 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -185,6 +185,55 @@ def __iter__(self): yield from iter(self.sampler) +class LoadScreenshots: + # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): + # source = [screen_number left top width height] (pixels) + check_requirements('mss') + import mss + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.img_size = img_size + self.stride = stride + self.transforms = transforms + self.auto = auto + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor["top"] if top is None else (monitor["top"] + top) + self.left = monitor["left"] if left is None else (monitor["left"] + left) + self.width = width or monitor["width"] + self.height = height or monitor["height"] + self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + + def __iter__(self): + return self + + def __next__(self): + # mss screen capture: get raw pixels from the screen as np array + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + self.frame += 1 + return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s + + class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): From 1320ce183e3997c4e3a7bf23c22b9edb222519a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 23 Sep 2022 23:20:19 +0200 Subject: [PATCH 166/326] Update `is_url()` (#9566) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/downloads.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index dd2698f995a4..bd495068522d 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -16,13 +16,13 @@ import torch -def is_url(url, check_online=True): - # Check if online file exists +def is_url(url, check_exists=True): + # Check if string is URL and check if URL exists try: url = str(url) result = urllib.parse.urlparse(url) assert all([result.scheme, result.netloc, result.path]) # check if is url - return (urllib.request.urlopen(url).getcode() == 200) if check_online else True # check if exists online + return (urllib.request.urlopen(url).getcode() == 200) if check_exists else True # check if exists online except (AssertionError, urllib.request.HTTPError): return False From d669a74623f273f74213a88b5233964d1ab3ea08 Mon Sep 17 00:00:00 2001 From: Gaz Iqbal Date: Fri, 23 Sep 2022 15:56:42 -0700 Subject: [PATCH 167/326] Detect.py supports running against a Triton container (#9228) * update coco128-seg comments * Enables detect.py to use Triton for inference Triton Inference Server is an open source inference serving software that streamlines AI inferencing. https://github.com/triton-inference-server/server The user can now provide a "--triton-url" argument to detect.py to use a local or remote Triton server for inference. For e.g., http://localhost:8000 will use http over port 8000 and grpc://localhost:8001 will use grpc over port 8001. Note, it is not necessary to specify a weights file to use Triton. A Triton container can be created by first exporting the Yolov5 model to a Triton supported runtime. Onnx, Torchscript, TensorRT are supported by both Triton and the export.py script. The exported model can then be containerized via the OctoML CLI. See https://github.com/octoml/octo-cli#getting-started for a guide. * added triton client to requirements * fixed support for TFSavedModels in Triton * reverted change * Test CoreML update Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Use pathlib Signed-off-by: Glenn Jocher * Refacto DetectMultiBackend to directly accept triton url as --weights http://... Signed-off-by: Glenn Jocher * Deploy category Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update triton.py Signed-off-by: Glenn Jocher * Update triton.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add printout and requirements check * Cleanup Signed-off-by: Glenn Jocher * triton fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed triton model query over grpc * Update check_requirements('tritonclient[all]') * group imports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix likely remote URL bug * update comment * Update is_url() * Fix 2x download attempt on http://path/to/model.pt Signed-off-by: Glenn Jocher Co-authored-by: glennjocher Co-authored-by: Gaz Iqbal Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 2 +- detect.py | 8 ++--- models/common.py | 44 +++++++++++++++-------- requirements.txt | 3 ++ segment/predict.py | 2 +- utils/downloads.py | 4 +-- utils/triton.py | 85 +++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 126 insertions(+), 22 deletions(-) create mode 100644 utils/triton.py diff --git a/classify/predict.py b/classify/predict.py index 011e7b83f09b..d3bec8eea7ba 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -104,7 +104,7 @@ def run( seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = torch.Tensor(im).to(device) + im = torch.Tensor(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 if len(im.shape) == 3: im = im[None] # expand for batch dim diff --git a/detect.py b/detect.py index 9036b26263e5..e442ed75f4c7 100644 --- a/detect.py +++ b/detect.py @@ -49,7 +49,7 @@ @smart_inference_mode() def run( - weights=ROOT / 'yolov5s.pt', # model.pt path(s) + weights=ROOT / 'yolov5s.pt', # model path or triton URL source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) @@ -108,11 +108,11 @@ def run( vid_path, vid_writer = [None] * bs, [None] * bs # Run inference - model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = torch.from_numpy(im).to(device) + im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: @@ -214,7 +214,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL') parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') diff --git a/models/common.py b/models/common.py index fac95a82fdb9..177704849d3d 100644 --- a/models/common.py +++ b/models/common.py @@ -10,6 +10,7 @@ from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path +from urllib.parse import urlparse import cv2 import numpy as np @@ -327,11 +328,13 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = self._model_type(w) # type - w = attempt_download(w) # download if not local + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w) fp16 &= pt or jit or onnx or engine # FP16 + nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) stride = 32 # default stride cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA + if not (pt or triton): + w = attempt_download(w) # download if not local if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) @@ -342,7 +345,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata - model = torch.jit.load(w, _extra_files=extra_files) + model = torch.jit.load(w, _extra_files=extra_files, map_location=device) model.half() if fp16 else model.float() if extra_files['config.txt']: # load metadata dict d = json.loads(extra_files['config.txt'], @@ -472,6 +475,12 @@ def gd_outputs(gd): predictor = pdi.create_predictor(config) input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) output_names = predictor.get_output_names() + elif triton: # NVIDIA Triton Inference Server + LOGGER.info(f'Using {w} as Triton Inference Server...') + check_requirements('tritonclient[all]') + from utils.triton import TritonRemoteModel + model = TritonRemoteModel(url=w) + nhwc = model.runtime.startswith("tensorflow") else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -488,6 +497,8 @@ def forward(self, im, augment=False, visualize=False): b, ch, h, w = im.shape # batch, channel, height, width if self.fp16 and im.dtype != torch.float16: im = im.half() # to FP16 + if self.nhwc: + im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3) if self.pt: # PyTorch y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) @@ -517,7 +528,7 @@ def forward(self, im, augment=False, visualize=False): self.context.execute_v2(list(self.binding_addrs.values())) y = [self.bindings[x].data for x in sorted(self.output_names)] elif self.coreml: # CoreML - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = im.cpu().numpy() im = Image.fromarray((im[0] * 255).astype('uint8')) # im = im.resize((192, 320), Image.ANTIALIAS) y = self.model.predict({'image': im}) # coordinates are xywh normalized @@ -532,8 +543,10 @@ def forward(self, im, augment=False, visualize=False): self.input_handle.copy_from_cpu(im) self.predictor.run() y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] + elif self.triton: # NVIDIA Triton Inference Server + y = self.model(im) else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = im.cpu().numpy() if self.saved_model: # SavedModel y = self.model(im, training=False) if self.keras else self.model(im) elif self.pb: # GraphDef @@ -566,8 +579,8 @@ def from_numpy(self, x): def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once - warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb - if any(warmup_types) and self.device.type != 'cpu': + warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton + if any(warmup_types) and (self.device.type != 'cpu' or self.triton): im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup @@ -575,14 +588,17 @@ def warmup(self, imgsz=(1, 3, 640, 640)): @staticmethod def _model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] from export import export_formats - sf = list(export_formats().Suffix) + ['.xml'] # export suffixes - check_suffix(p, sf) # checks - p = Path(p).name # eliminate trailing separators - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, xml2 = (s in p for s in sf) - xml |= xml2 # *_openvino_model or *.xml - tflite &= not edgetpu # *.tflite - return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle + from utils.downloads import is_url + sf = list(export_formats().Suffix) # export suffixes + if not is_url(p, check=False): + check_suffix(p, sf) # checks + url = urlparse(p) # if url may be Triton inference server + types = [s in Path(p).name for s in sf] + types[8] &= not types[9] # tflite &= not edgetpu + triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc]) + return types + [triton] @staticmethod def _load_metadata(f=Path('path/to/meta.yaml')): diff --git a/requirements.txt b/requirements.txt index 914da54e73fc..4d6ec3509efa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,6 +34,9 @@ seaborn>=0.11.0 # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export +# Deploy -------------------------------------- +# tritonclient[all]~=2.24.0 + # Extras -------------------------------------- ipython # interactive notebook psutil # system utilization diff --git a/segment/predict.py b/segment/predict.py index 43cebc706371..2e794c342de1 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -114,7 +114,7 @@ def run( seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = torch.from_numpy(im).to(device) + im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: diff --git a/utils/downloads.py b/utils/downloads.py index bd495068522d..433de84b51ca 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -16,13 +16,13 @@ import torch -def is_url(url, check_exists=True): +def is_url(url, check=True): # Check if string is URL and check if URL exists try: url = str(url) result = urllib.parse.urlparse(url) assert all([result.scheme, result.netloc, result.path]) # check if is url - return (urllib.request.urlopen(url).getcode() == 200) if check_exists else True # check if exists online + return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online except (AssertionError, urllib.request.HTTPError): return False diff --git a/utils/triton.py b/utils/triton.py new file mode 100644 index 000000000000..a94ef0ad197d --- /dev/null +++ b/utils/triton.py @@ -0,0 +1,85 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" Utils to interact with the Triton Inference Server +""" + +import typing +from urllib.parse import urlparse + +import torch + + +class TritonRemoteModel: + """ A wrapper over a model served by the Triton Inference Server. It can + be configured to communicate over GRPC or HTTP. It accepts Torch Tensors + as input and returns them as outputs. + """ + + def __init__(self, url: str): + """ + Keyword arguments: + url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 + """ + + parsed_url = urlparse(url) + if parsed_url.scheme == "grpc": + from tritonclient.grpc import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository.models[0].name + self.metadata = self.client.get_model_metadata(self.model_name, as_json=True) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + + else: + from tritonclient.http import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository[0]['name'] + self.metadata = self.client.get_model_metadata(self.model_name) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + + self._create_input_placeholders_fn = create_input_placeholders + + @property + def runtime(self): + """Returns the model runtime""" + return self.metadata.get("backend", self.metadata.get("platform")) + + def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: + """ Invokes the model. Parameters can be provided via args or kwargs. + args, if provided, are assumed to match the order of inputs of the model. + kwargs are matched with the model input names. + """ + inputs = self._create_inputs(*args, **kwargs) + response = self.client.infer(model_name=self.model_name, inputs=inputs) + result = [] + for output in self.metadata['outputs']: + tensor = torch.as_tensor(response.as_numpy(output['name'])) + result.append(tensor) + return result[0] if len(result) == 1 else result + + def _create_inputs(self, *args, **kwargs): + args_len, kwargs_len = len(args), len(kwargs) + if not args_len and not kwargs_len: + raise RuntimeError("No inputs provided.") + if args_len and kwargs_len: + raise RuntimeError("Cannot specify args and kwargs at the same time") + + placeholders = self._create_input_placeholders_fn() + if args_len: + if args_len != len(placeholders): + raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") + for input, value in zip(placeholders, args): + input.set_data_from_numpy(value.cpu().numpy()) + else: + for input in placeholders: + value = kwargs[input.name] + input.set_data_from_numpy(value.cpu().numpy()) + return placeholders From c8e52304cf5c34653570c5c3953ba061bc33c1af Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Sep 2022 16:02:41 +0200 Subject: [PATCH 168/326] New `scale_segments()` function (#9570) * Rename scale_coords to scale_boxes * add scale_segments --- detect.py | 4 +-- models/common.py | 4 +-- segment/predict.py | 4 +-- segment/val.py | 6 ++--- utils/general.py | 46 ++++++++++++++++++++++++++------- utils/loggers/comet/__init__.py | 8 +++--- utils/plots.py | 4 +-- val.py | 6 ++--- 8 files changed, 54 insertions(+), 28 deletions(-) diff --git a/detect.py b/detect.py index e442ed75f4c7..4971033b35fb 100644 --- a/detect.py +++ b/detect.py @@ -42,7 +42,7 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) + increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, smart_inference_mode @@ -148,7 +148,7 @@ def run( annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, 5].unique(): diff --git a/models/common.py b/models/common.py index 177704849d3d..273e73d9e729 100644 --- a/models/common.py +++ b/models/common.py @@ -23,7 +23,7 @@ from utils.dataloaders import exif_transpose, letterbox from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh, + increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode @@ -703,7 +703,7 @@ def forward(self, ims, size=640, augment=False, profile=False): self.multi_label, max_det=self.max_det) # NMS for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) + scale_boxes(shape1, y[i][:, :4], shape0[i]) return Detections(ims, y, files, dt, self.names, x.shape) diff --git a/segment/predict.py b/segment/predict.py index 2e794c342de1..2241204715b5 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -42,7 +42,7 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) + increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import process_mask from utils.torch_utils import select_device, smart_inference_mode @@ -152,7 +152,7 @@ def run( masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, 5].unique(): diff --git a/segment/val.py b/segment/val.py index 59ab76672a30..0a37998c1771 100644 --- a/segment/val.py +++ b/segment/val.py @@ -44,7 +44,7 @@ from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_coords, xywh2xyxy, xyxy2xywh) + scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader @@ -298,12 +298,12 @@ def run( if single_cls: pred[:, 5] = 0 predn = pred.clone() - scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) diff --git a/utils/general.py b/utils/general.py index fd0b4090a0fa..87e7e20df1ab 100644 --- a/utils/general.py +++ b/utils/general.py @@ -725,7 +725,7 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right if clip: - clip_coords(x, (h - eps, w - eps)) # warning: inplace clip + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center @@ -769,7 +769,23 @@ def resample_segments(segments, n=1000): return segments -def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): + # Rescale boxes (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + boxes[:, [0, 2]] -= pad[0] # x padding + boxes[:, [1, 3]] -= pad[1] # y padding + boxes[:, :4] /= gain + clip_boxes(boxes, img0_shape) + return boxes + + +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new @@ -778,15 +794,15 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): gain = ratio_pad[0][0] pad = ratio_pad[1] - coords[:, [0, 2]] -= pad[0] # x padding - coords[:, [1, 3]] -= pad[1] # y padding - coords[:, :4] /= gain - clip_coords(coords, img0_shape) - return coords + segments[:, 0] -= pad[0] # x padding + segments[:, 1] -= pad[1] # y padding + segments /= gain + clip_segments(segments, img0_shape) + return segments -def clip_coords(boxes, shape): - # Clip bounding xyxy bounding boxes to image shape (height, width) +def clip_boxes(boxes, shape): + # Clip boxes (xyxy) to image shape (height, width) if isinstance(boxes, torch.Tensor): # faster individually boxes[:, 0].clamp_(0, shape[1]) # x1 boxes[:, 1].clamp_(0, shape[0]) # y1 @@ -797,6 +813,16 @@ def clip_coords(boxes, shape): boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 +def clip_segments(boxes, shape): + # Clip segments (xy1,xy2,...) to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x + boxes[:, 1].clamp_(0, shape[0]) # y + else: # np.array (faster grouped) + boxes[:, 0] = boxes[:, 0].clip(0, shape[1]) # x + boxes[:, 1] = boxes[:, 1].clip(0, shape[0]) # y + + def non_max_suppression( prediction, conf_thres=0.25, @@ -980,7 +1006,7 @@ def apply_classifier(x, model, img, im0): d[:, :4] = xywh2xyxy(b).long() # Rescale boxes from img_size to im0 size - scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + scale_boxes(img.shape[2:], d[:, :4], im0[i].shape) # Classes pred_cls1 = d[:, 5].long() diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 3b3142b002c5..ba5cecc8e096 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -28,7 +28,7 @@ import yaml from utils.dataloaders import img2label_paths -from utils.general import check_dataset, scale_coords, xywh2xyxy +from utils.general import check_dataset, scale_boxes, xywh2xyxy from utils.metrics import box_iou COMET_PREFIX = "comet://" @@ -293,14 +293,14 @@ def preprocess_prediction(self, image, labels, shape, pred): pred[:, 5] = 0 predn = pred.clone() - scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) labelsn = None if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels + scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels - scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred return predn, labelsn diff --git a/utils/plots.py b/utils/plots.py index 51bb7d6c20af..36df271c60e1 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -20,7 +20,7 @@ from PIL import Image, ImageDraw, ImageFont from utils import TryExcept, threaded -from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, +from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, is_ascii, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness from utils.segment.general import scale_image @@ -565,7 +565,7 @@ def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad xyxy = xywh2xyxy(b).long() - clip_coords(xyxy, im.shape) + clip_boxes(xyxy, im.shape) crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory diff --git a/val.py b/val.py index 3ab4bc3fdb58..c0954498d2fb 100644 --- a/val.py +++ b/val.py @@ -40,7 +40,7 @@ from utils.dataloaders import create_dataloader from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_coords, xywh2xyxy, xyxy2xywh) + scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, smart_inference_mode @@ -244,12 +244,12 @@ def run( if single_cls: pred[:, 5] = 0 predn = pred.clone() - scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct = process_batch(predn, labelsn, iouv) if plots: From f11a8a62d27c2740af5df940973d231fd5fcb038 Mon Sep 17 00:00:00 2001 From: Forever518 <1423429527@qq.com> Date: Sun, 25 Sep 2022 01:35:07 +0800 Subject: [PATCH 169/326] generator seed fix for DDP mAP drop (#9545) * Try to fix DDP mAP drop by setting generator's seed to RANK * Fix default activation bug * Update dataloaders.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 4 ++-- models/yolo.py | 2 +- utils/dataloaders.py | 5 +++-- utils/segment/dataloaders.py | 8 +++++--- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/models/common.py b/models/common.py index 273e73d9e729..2fe99be8972b 100644 --- a/models/common.py +++ b/models/common.py @@ -40,13 +40,13 @@ def autopad(k, p=None, d=1): # kernel, padding, dilation class Conv(nn.Module): # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) - act = nn.SiLU() # default activation + default_act = nn.SiLU() # default activation def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) - self.act = self.act if act is True else act if isinstance(act, nn.Module) else nn.Identity() + self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() def forward(self, x): return self.act(self.bn(self.conv(x))) diff --git a/models/yolo.py b/models/yolo.py index 1d0da2a6e010..ed21c067ee93 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -301,7 +301,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') if act: - Conv.act = eval(act) # redefine default activation, i.e. Conv.act = nn.SiLU() + Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() LOGGER.info(f"{colorstr('activation:')} {act}") # print na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 7aee0b891161..6cd1da6b9cf9 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -40,6 +40,7 @@ VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders # Get orientation exif tag @@ -139,7 +140,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(0) + generator.manual_seed(6148914691236517205 + RANK) return loader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, @@ -1169,7 +1170,7 @@ def create_classification_dataloader(path, nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) generator = torch.Generator() - generator.manual_seed(0) + generator.manual_seed(6148914691236517205 + RANK) return InfiniteDataLoader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 97ef8556068e..a63d6ec013fd 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -17,6 +17,8 @@ from ..torch_utils import torch_distributed_zero_first from .augmentations import mixup, random_perspective +RANK = int(os.getenv('RANK', -1)) + def create_dataloader(path, imgsz, @@ -61,8 +63,8 @@ def create_dataloader(path, nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates - # generator = torch.Generator() - # generator.manual_seed(0) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) return loader( dataset, batch_size=batch_size, @@ -72,7 +74,7 @@ def create_dataloader(path, pin_memory=True, collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, worker_init_fn=seed_worker, - # generator=generator, + generator=generator, ), dataset From 55fbac933bc25b3151082021fa3f10790b3b936a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 02:59:25 +0200 Subject: [PATCH 170/326] Update default GitHub assets (#9573) * Update default GitHub assets Signed-off-by: Glenn Jocher * Update downloads.py Signed-off-by: Glenn Jocher * Update downloads.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/downloads.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index 433de84b51ca..73b8334cb94a 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -87,9 +87,7 @@ def github_assets(repository, version='latest'): return file # GitHub assets - assets = [ - 'yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt', - 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] + assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default try: tag, assets = github_assets(repo, release) except Exception: @@ -107,7 +105,6 @@ def github_assets(repository, version='latest'): safe_download( file, url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - url2=f'https://storage.googleapis.com/{repo}/{tag}/{name}', # backup url (optional) min_bytes=1E5, error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') From ee91dc9bb32d2dddc46c633b711a778a6c603143 Mon Sep 17 00:00:00 2001 From: "David A. Macey" Date: Sun, 25 Sep 2022 08:47:16 -0400 Subject: [PATCH 171/326] Update requirements.txt comment https://pytorch.org/get-started/locally/ (#9576) * Update Requirements with PyTorch CUDA Added --extra-index-url https://download.pytorch.org/whl/cu116 URL to requirements file for ease of creating venv with CUDA enabled PyTorch. Otherwise CPU PyTorch is installed an unable to use local GPUs. Signed-off-by: David A. Macey * Update requirements.txt Signed-off-by: Glenn Jocher * Update requirements.txt Signed-off-by: Glenn Jocher Signed-off-by: David A. Macey Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4d6ec3509efa..0436f415c642 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ Pillow>=7.1.2 PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 -torch>=1.7.0 +torch>=1.7.0 # see https://pytorch.org/get-started/locally/ (recommended) torchvision>=0.8.1 tqdm>=4.64.0 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 From 2787ad701fbb308cfb494ae8fb68b0fcea0e4077 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 14:52:49 +0200 Subject: [PATCH 172/326] Add segment line predictions (#9571) * Add segment line predictions Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/predict.py | 20 ++++++++++++-------- utils/segment/general.py | 14 ++++++++++++++ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 2241204715b5..607a8697d731 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -42,9 +42,10 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) + increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, + strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.segment.general import process_mask +from utils.segment.general import masks2segments, process_mask from utils.torch_utils import select_device, smart_inference_mode @@ -145,14 +146,16 @@ def run( save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string - gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() + # Segments + if save_txt: + segments = reversed(masks2segments(masks)) + segments = [scale_segments(im.shape[2:], x, im0.shape).round() for x in segments] # Print results for c in det[:, 5].unique(): @@ -165,10 +168,10 @@ def run( im_gpu=None if retina_masks else im[i]) # Write results - for *xyxy, conf, cls in reversed(det[:, :6]): + for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + segj = segments[j].reshape(-1) # (n,2) to (n*2) + line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') @@ -176,6 +179,7 @@ def run( c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) + annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) diff --git a/utils/segment/general.py b/utils/segment/general.py index 36547ed0889c..655123bdcfeb 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -1,4 +1,5 @@ import cv2 +import numpy as np import torch import torch.nn.functional as F @@ -118,3 +119,16 @@ def masks_iou(mask1, mask2, eps=1e-7): intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection return intersection / (union + eps) + + +def masks2segments(masks, strategy='largest'): + # Convert masks(n,160,160) into segments(n,xy) + segments = [] + for x in masks.int().numpy().astype('uint8'): + c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + segments.append(c.astype('float32')) + return segments From 966b0e09f0a5261e555c2a137af2ef9d58cc9779 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 16:21:26 +0200 Subject: [PATCH 173/326] TensorRT detect.py inference fix (#9581) * Update * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Segment fix * Segment fix Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 6 ++++++ classify/predict.py | 3 ++- detect.py | 3 ++- segment/predict.py | 5 +++-- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index fffc92d1b72f..1ec68e8412f9 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -44,6 +44,12 @@ jobs: - name: Benchmark SegmentationModel run: | python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 --hard-fail 0.22 + - name: Test predictions + run: | + python export.py --weights ${{ matrix.model }}-cls.pt --include onnx --img 224 + python detect.py --weights ${{ matrix.model }}.onnx --img 320 + python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320 + python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224 Tests: timeout-minutes: 60 diff --git a/classify/predict.py b/classify/predict.py index d3bec8eea7ba..9114aab1d703 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -89,14 +89,15 @@ def run( imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader + bs = 1 # batch_size if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) + bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/detect.py b/detect.py index 4971033b35fb..8f48d8d28000 100644 --- a/detect.py +++ b/detect.py @@ -97,14 +97,15 @@ def run( imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader + bs = 1 # batch_size if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/segment/predict.py b/segment/predict.py index 607a8697d731..94117cd78633 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -100,14 +100,15 @@ def run( imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader + bs = 1 # batch_size if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference @@ -179,7 +180,7 @@ def run( c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) - annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) + # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) From 639d82fbabed66f347a17fd39cd058bcd26a4142 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 20:12:57 +0200 Subject: [PATCH 174/326] Update Comet links (#9587) * Update Comet links Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 4 ++-- tutorial.ipynb | 4 ++-- utils/loggers/comet/README.md | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 1d43111d56e7..1c5e123d61e7 100644 --- a/README.md +++ b/README.md @@ -168,7 +168,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
- + @@ -186,7 +186,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 |Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases |:-:|:-:|:-:|:-:|:-:| -|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) +|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) ##
Why YOLOv5
diff --git a/tutorial.ipynb b/tutorial.ipynb index f87cccd99df8..8c78af2b84cd 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -865,7 +865,7 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -874,7 +874,7 @@ "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", "\"yolo-ui\"" diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 7b0b8e0e2f09..3a51cb9b5a25 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -2,13 +2,13 @@ # YOLOv5 with Comet -This guide will cover how to use YOLOv5 with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet) # About Comet Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! # Getting Started @@ -253,4 +253,4 @@ comet optimizer -j utils/loggers/comet/hpo.py \ Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) -hyperparameter-yolo \ No newline at end of file +hyperparameter-yolo From 9006b41498a3bc512e293061e017a518f11e9902 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 21:40:28 +0200 Subject: [PATCH 175/326] Add global YOLOv5_DATASETS_DIR (#9586) Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 87e7e20df1ab..de7871cb23f9 100644 --- a/utils/general.py +++ b/utils/general.py @@ -43,8 +43,8 @@ RANK = int(os.getenv('RANK', -1)) # Settings -DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf From 9f1cf8dd1ca79b8128d73ac144e8899f51bc5816 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 23:03:14 +0200 Subject: [PATCH 176/326] Add Paperspace Gradient badges (#9588) * Add Paperspace Gradient badges Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 4 ++-- .github/workflows/greetings.yml | 8 ++++---- README.md | 7 +++++-- tutorial.ipynb | 5 +++-- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index bb62714f003f..7e8aa6f7f087 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -12,13 +12,13 @@ [English](../README.md) | 简体中文
- CI CPU testing + YOLOv5 CI YOLOv5 Citation Docker Pulls
+ Run on Gradient Open In Colab Open In Kaggle - Join Forum

diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 91bf190eb727..5e1589c340ed 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -44,14 +44,14 @@ jobs: YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - - **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle + - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - ## Status - CI CPU testing + YOLOv5 CI + + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/README.md b/README.md index 1c5e123d61e7..227735b52fac 100644 --- a/README.md +++ b/README.md @@ -12,13 +12,13 @@ English | [简体中文](.github/README_cn.md)
- CI CPU testing + YOLOv5 CI YOLOv5 Citation Docker Pulls
+ Run on Gradient Open In Colab Open In Kaggle - Join Forum

@@ -315,6 +315,9 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu Get started in seconds with our verified environments. Click each icon below for details.
+ + + diff --git a/tutorial.ipynb b/tutorial.ipynb index 8c78af2b84cd..5d867fb36c93 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -375,6 +375,7 @@ "\n", "\n", "
\n", + " \"Run\n", " \"Open\n", " \"Open\n", "
\n", @@ -945,7 +946,7 @@ "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", - "- **Google Colab and Kaggle** notebooks with free GPU: \"Open \"Open\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" @@ -959,7 +960,7 @@ "source": [ "# Status\n", "\n", - "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", "\n", "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] From 959a4665f820362c95f7435dc05175deeff19671 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 23:26:15 +0200 Subject: [PATCH 177/326] #YOLOVISION22 announcement (#9590) * #YOLOVISION22 announcement Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index 227735b52fac..56349867e4b6 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,19 @@
+ + Hi, I'm [Glenn Jocher](https://www.linkedin.com/in/glenn-jocher/), author of [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. + + I'd like to invite you to attend the world's first-ever YOLO conference: [#YOLOVISION22](https://ultralytics.com/yolo-vision)! + + This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, SenseTime's MMLabs, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. + + Save your spot at https://ultralytics.com/yolo-vision! + + + + +##
+
+

@@ -191,6 +206,8 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 ##

Why YOLOv5
+YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. +

YOLOv5-P5 640 Figure (click to expand) From bfe052b8e1ab398e834a62b607e7d544e1a9876f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Sep 2022 12:39:08 +0200 Subject: [PATCH 178/326] Bump actions/stale from 5 to 6 (#9595) Bumps [actions/stale](https://github.com/actions/stale) from 5 to 6. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v5...v6) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 03d99790a4a7..9067c343608b 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v5 + - uses: actions/stale@v6 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | From bd9c0c42aee090b373db51c7393c972c26ed9913 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 13:27:34 +0200 Subject: [PATCH 179/326] #YOLOVISION22 update (#9598) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 56349867e4b6..514270973137 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ I'd like to invite you to attend the world's first-ever YOLO conference: [#YOLOVISION22](https://ultralytics.com/yolo-vision)! - This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, SenseTime's MMLabs, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. + This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, OpenMMLab's MMDetection, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. Save your spot at https://ultralytics.com/yolo-vision! From c4c0ee8fc35937cfa940fdaaaf6b9660f5b355f5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 14:13:03 +0200 Subject: [PATCH 180/326] Apple MPS -> CPU NMS fallback strategy (#9600) Until more ops are fully supported this update will allow for seamless MPS inference (but slower MPS to CPU transfer before NMS, so slower NMS times). Partially resolves https://github.com/ultralytics/yolov5/issues/9596 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/general.py b/utils/general.py index de7871cb23f9..a855691d3a1f 100644 --- a/utils/general.py +++ b/utils/general.py @@ -843,6 +843,8 @@ def non_max_suppression( if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output + if 'mps' in prediction.device.type: # MPS not fully supported yet, convert tensors to CPU before NMS + prediction = prediction.cpu() bs = prediction.shape[0] # batch size nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates From a5748e4b93ae6944ea813b26de6540e80141070b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 20:10:24 +0200 Subject: [PATCH 181/326] Updated Segmentation and Classification usage (#9607) * Updated Segmentation and Classification usage Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index e3cf392b0101..20c1fbc5c7b8 100644 --- a/export.py +++ b/export.py @@ -560,13 +560,20 @@ def run( # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): + tp = type(model) + dir = Path('segment' if tp is SegmentationModel else 'classify' if tp is ClassificationModel else '') + predict = 'detect.py' if tp is DetectionModel else 'predict.py' h = '--half' if half else '' # --half FP16 inference arg LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python detect.py --weights {f[-1]} {h}" - f"\nValidate: python val.py --weights {f[-1]} {h}" + f"\nDetect: python {dir / predict} --weights {f[-1]} {h}" + f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" f"\nVisualize: https://netron.app") + if tp is ClassificationModel: + LOGGER.warning("WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference") + if tp is SegmentationModel: + LOGGER.warning("WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference") return f # return list of exported files/dirs From 6b2c9d1d0f5f9acad86ff9e7043f094a071aa6fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 20:46:50 +0200 Subject: [PATCH 182/326] Update export.py Usage examples (#9609) * Update export.py Usage examples Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/export.py b/export.py index 20c1fbc5c7b8..cf37965cea6b 100644 --- a/export.py +++ b/export.py @@ -560,20 +560,17 @@ def run( # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): - tp = type(model) - dir = Path('segment' if tp is SegmentationModel else 'classify' if tp is ClassificationModel else '') - predict = 'detect.py' if tp is DetectionModel else 'predict.py' + cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type + dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg + s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ + "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python {dir / predict} --weights {f[-1]} {h}" + f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" f"\nVisualize: https://netron.app") - if tp is ClassificationModel: - LOGGER.warning("WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference") - if tp is SegmentationModel: - LOGGER.warning("WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference") return f # return list of exported files/dirs From 1460e5715700cdb130472e1314074ff648f811d8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Sep 2022 00:29:23 +0200 Subject: [PATCH 183/326] Fix `is_url('https://ultralytics.com')` (#9610) Failing on missing path, i.e. no 'www.' Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/downloads.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/downloads.py b/utils/downloads.py index 73b8334cb94a..60417c1f8835 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -21,7 +21,7 @@ def is_url(url, check=True): try: url = str(url) result = urllib.parse.urlparse(url) - assert all([result.scheme, result.netloc, result.path]) # check if is url + assert all([result.scheme, result.netloc]) # check if is url return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online except (AssertionError, urllib.request.HTTPError): return False From 7314363f26e23fc831a9a739b4031f9f0217084a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Sep 2022 16:58:14 +0200 Subject: [PATCH 184/326] Add `results.save(save_dir='path', exist_ok=False)` (#9617) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 2fe99be8972b..d889d0292c61 100644 --- a/models/common.py +++ b/models/common.py @@ -775,12 +775,12 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l def show(self, labels=True): self._run(show=True, labels=labels) # show results - def save(self, labels=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir + def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False): + save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir self._run(save=True, labels=labels, save_dir=save_dir) # save results - def crop(self, save=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None + def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False): + save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None return self._run(crop=True, save=save, save_dir=save_dir) # crop results def render(self, labels=True): From 2373d5470e386a0c63c6ab77fbee6d699665e27b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Sep 2022 18:02:48 +0200 Subject: [PATCH 185/326] NMS MPS device wrapper (#9620) * NMS MPS device wrapper May resolve https://github.com/ultralytics/yolov5/issues/9613 Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index a855691d3a1f..d31b043a113e 100644 --- a/utils/general.py +++ b/utils/general.py @@ -843,7 +843,9 @@ def non_max_suppression( if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output - if 'mps' in prediction.device.type: # MPS not fully supported yet, convert tensors to CPU before NMS + device = prediction.device + mps = 'mps' in device.type # Apple MPS + if mps: # MPS not fully supported yet, convert tensors to CPU before NMS prediction = prediction.cpu() bs = prediction.shape[0] # batch size nc = prediction.shape[2] - nm - 5 # number of classes @@ -930,6 +932,8 @@ def non_max_suppression( i = i[iou.sum(1) > 1] # require redundancy output[xi] = x[i] + if mps: + output[xi] = output[xi].to(device) if (time.time() - t) > time_limit: LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') break # time limit exceeded From 799e3d0cc92a9f431d97931641e7d0b46720699a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Sep 2022 16:43:11 +0200 Subject: [PATCH 186/326] Add SegmentationModel unsupported warning (#9632) * Add SegmentationModel unsupported warning Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index 4224760a4732..95b95a5c30cc 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load - from models.yolo import ClassificationModel, DetectionModel + from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device @@ -47,8 +47,11 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') + elif model.pt and isinstance(model.model, SegmentationModel): + LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' + 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: From 0860e58557f26a0136dd8afbc82f408f31d15ecd Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Fri, 30 Sep 2022 02:31:45 +0530 Subject: [PATCH 187/326] Disabled upload_dataset flag temporarily due to an artifact related bug (#9652) * disabled upload_dataset flag temporarily due to an artifact related bug * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/wandb/wandb_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index e850d2ac8a7c..d2dd0fa7c6cd 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -132,6 +132,11 @@ def __init__(self, opt, run_id=None, job_type='Training'): job_type (str) -- To set the job_type for this run """ + # Temporary-fix + if opt.upload_dataset: + opt.upload_dataset = False + LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") + # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run From 82bec4c8785e123bbea01f6f2d4215c2077ac81f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 29 Sep 2022 23:35:39 +0200 Subject: [PATCH 188/326] Add NVIDIA Jetson Nano Deployment tutorial (#9656) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 514270973137..8b1c98b34e8f 100644 --- a/README.md +++ b/README.md @@ -163,6 +163,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 - [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) - [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 NEW - [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 +- [NVIDIA Jetson Nano Deployment](https://github.com/ultralytics/yolov5/issues/9627) 🌟 NEW - [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) - [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) - [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) From 8a19437690548a158b78ab27b7f5b463a268fa19 Mon Sep 17 00:00:00 2001 From: Anant Sakhare <70131870+senhorinfinito@users.noreply.github.com> Date: Sat, 1 Oct 2022 20:12:31 +0530 Subject: [PATCH 189/326] =?UTF-8?q?Added=20cutout=20import=20from=20utils/?= =?UTF-8?q?augmentations.py=20to=20use=20Cutout=20Aug=20in=20=E2=80=A6=20(?= =?UTF-8?q?#9668)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added cutout import from utils/augmentations.py to use Cutout Aug in data loader by un-commenting line 679, 680, 681 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6cd1da6b9cf9..d849d5150f4b 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -29,7 +29,7 @@ from tqdm import tqdm from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, - letterbox, mixup, random_perspective) + cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first From 1158a50abd78808049327fdf60724b2b32726d88 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 2 Oct 2022 13:37:54 +0200 Subject: [PATCH 190/326] Simplify val.py benchmark mode with speed mode (#9674) Update --- benchmarks.py | 4 ++-- segment/val.py | 3 +-- val.py | 3 +-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index b3b58eb3257c..ef5c882973f0 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -81,10 +81,10 @@ def run( # Validate if model_type == SegmentationModel: - result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) else: # DetectionModel: - result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) speed = result[2][1] # times (preprocess, inference, postprocess) y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference diff --git a/segment/val.py b/segment/val.py index 0a37998c1771..f1ec54638d61 100644 --- a/segment/val.py +++ b/segment/val.py @@ -210,8 +210,7 @@ def run( assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad = 0.0 if task in ('speed', 'benchmark') else 0.5 - rect = False if task == 'benchmark' else pt # square inference for benchmarks + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, diff --git a/val.py b/val.py index c0954498d2fb..ca838c0beb2f 100644 --- a/val.py +++ b/val.py @@ -169,8 +169,7 @@ def run( assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad = 0.0 if task in ('speed', 'benchmark') else 0.5 - rect = False if task == 'benchmark' else pt # square inference for benchmarks + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, From c98128fe71a8676037a0605ab389c7473c743d07 Mon Sep 17 00:00:00 2001 From: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> Date: Sun, 2 Oct 2022 18:25:10 -0400 Subject: [PATCH 191/326] Allow list for Comet artifact class 'names' field (#9654) * Update __init__.py In the Comet logger, when I run train.py, it wants to download the data artifact. It was requiring me to format the 'names' field in the data artifact metadata as a dictionary, so I've changed this so that it also accepts a list. Signed-off-by: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update utils/loggers/comet/__init__.py Co-authored-by: Dhruv Nair Signed-off-by: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> Signed-off-by: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Nair Co-authored-by: Glenn Jocher --- utils/loggers/comet/__init__.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index ba5cecc8e096..b0318f88d6a6 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -353,7 +353,14 @@ def download_dataset_artifact(self, artifact_path): metadata = logged_artifact.metadata data_dict = metadata.copy() data_dict["path"] = artifact_save_dir - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + + metadata_names = metadata.get("names") + if type(metadata_names) == dict: + data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + elif type(metadata_names) == list: + data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + else: + raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" data_dict = self.update_data_paths(data_dict) return data_dict From 68d654d8c4d473aa81be91ac42f320009736992b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Oct 2022 16:31:51 +0200 Subject: [PATCH 192/326] [pre-commit.ci] pre-commit suggestions (#9685) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v2.37.3 → v2.38.2](https://github.com/asottile/pyupgrade/compare/v2.37.3...v2.38.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ba8005535397..1cd102c26b41 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.37.3 + rev: v2.38.2 hooks: - id: pyupgrade name: Upgrade code From e4398cf179601d47207e9f526cf0760b82058930 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Oct 2022 16:32:19 +0200 Subject: [PATCH 193/326] TensorRT `--dynamic` fix (#9691) * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index cf37965cea6b..66d4d636133a 100644 --- a/export.py +++ b/export.py @@ -251,11 +251,11 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 onnx = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') @@ -285,7 +285,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING ⚠️ --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) From 7f097ddb6c9921d64fa504a8db79cf24fa0a913c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Oct 2022 22:29:46 +0200 Subject: [PATCH 194/326] FROM nvcr.io/nvidia/pytorch:22.09-py3 (#9711) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 764ee278c22e..9b93fad7b203 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.08-py3 +FROM nvcr.io/nvidia/pytorch:22.09-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 5ef69ef3e6180709bc292370ed314b6029ecabfc Mon Sep 17 00:00:00 2001 From: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Date: Thu, 6 Oct 2022 14:55:15 -0600 Subject: [PATCH 195/326] Error in utils/segment/general `masks2segments()` (#9724) When running segmentation predict on gpu, the conversion from tensor to numpy fails. Calling `.cpu()` solves this problem. Signed-off-by: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Signed-off-by: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> --- utils/segment/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/segment/general.py b/utils/segment/general.py index 655123bdcfeb..43bdc460f928 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -124,7 +124,7 @@ def masks_iou(mask1, mask2, eps=1e-7): def masks2segments(masks, strategy='largest'): # Convert masks(n,160,160) into segments(n,xy) segments = [] - for x in masks.int().numpy().astype('uint8'): + for x in masks.int().cpu().numpy().astype('uint8'): c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] if strategy == 'concat': # concatenate all segments c = np.concatenate([x.reshape(-1, 2) for x in c]) From 209be932dec9e89b902f0ac2975fa599e9bc676f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Oct 2022 23:51:29 +0200 Subject: [PATCH 196/326] Fix segment evolution keys (#9742) * Update * Cleanup --- segment/train.py | 2 +- train.py | 4 +++- utils/general.py | 5 ++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/segment/train.py b/segment/train.py index 5121c5fa784a..26f0d0c13c78 100644 --- a/segment/train.py +++ b/segment/train.py @@ -651,7 +651,7 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - print_mutation(results, hyp.copy(), save_dir, opt.bucket) + print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) diff --git a/train.py b/train.py index 9efece250581..177e081c8c37 100644 --- a/train.py +++ b/train.py @@ -607,7 +607,9 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - print_mutation(results, hyp.copy(), save_dir, opt.bucket) + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', + 'val/obj_loss', 'val/cls_loss') + print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) diff --git a/utils/general.py b/utils/general.py index d31b043a113e..e2faca9dbf2a 100644 --- a/utils/general.py +++ b/utils/general.py @@ -957,11 +957,10 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") -def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): +def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): evolve_csv = save_dir / 'evolve.csv' evolve_yaml = save_dir / 'hyp_evolve.yaml' - keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', - 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] + keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps] keys = tuple(x.strip() for x in keys) vals = results + tuple(hyp.values()) n = len(keys) From 2f1eb21ad6c0f715f38200c31e6e01a92c5acb25 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 Oct 2022 14:54:21 +0200 Subject: [PATCH 197/326] Remove #YOLOVISION22 notice (#9751) Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/README.md b/README.md index 8b1c98b34e8f..8c19e52c45d7 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,4 @@
- - Hi, I'm [Glenn Jocher](https://www.linkedin.com/in/glenn-jocher/), author of [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. - - I'd like to invite you to attend the world's first-ever YOLO conference: [#YOLOVISION22](https://ultralytics.com/yolo-vision)! - - This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, OpenMMLab's MMDetection, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. - - Save your spot at https://ultralytics.com/yolo-vision! - - - - -##
-
-

From 7a69035eb8a15f44a1dc8f1e07ee71b674e98271 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 Oct 2022 12:53:12 +0200 Subject: [PATCH 198/326] Update Loggers (#9760) * Update * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update requirements.txt Signed-off-by: Glenn Jocher * Update * Update README.md Signed-off-by: Glenn Jocher * Update Signed-off-by: Glenn Jocher --- README.md | 16 ++++++---------- requirements.txt | 2 +- tutorial.ipynb | 25 +++---------------------- utils/docker/Dockerfile | 2 +- utils/loggers/__init__.py | 14 +++++++------- utils/loggers/wandb/wandb_utils.py | 2 +- 6 files changed, 19 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index 8c19e52c45d7..8f45ccd229b5 100644 --- a/README.md +++ b/README.md @@ -155,7 +155,6 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 - [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) - [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314) - [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) 🌟 NEW -- [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289) - [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW - [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW - [Deci Platform](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 NEW @@ -171,23 +170,20 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12

- + - + - + - - -
-|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases -|:-:|:-:|:-:|:-:|:-:| -|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) +|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow| +|:-:|:-:|:-:|:-:| +|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)| ##
Why YOLOv5
diff --git a/requirements.txt b/requirements.txt index 0436f415c642..52f7b9ea57d2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,8 +16,8 @@ tqdm>=4.64.0 # Logging ------------------------------------- tensorboard>=2.4.1 -# wandb # clearml +# comet # Plotting ------------------------------------ pandas>=1.1.4 diff --git a/tutorial.ipynb b/tutorial.ipynb index 5d867fb36c93..63abebc5b37f 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -655,7 +655,7 @@ "cell_type": "code", "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML', 'W&B']\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", "\n", "if logger == 'TensorBoard':\n", " %load_ext tensorboard\n", @@ -664,10 +664,7 @@ " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init\n", - "elif logger == 'W&B':\n", - " %pip install -q wandb\n", - " import wandb; wandb.login()" + " %pip install -q clearml && clearml-init" ], "metadata": { "id": "i3oKtE4g-aNn" @@ -699,7 +696,7 @@ "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", + "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet' to automatically track and visualize YOLOv5 🚀 runs with Comet\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", @@ -905,22 +902,6 @@ "id": "Lay2WsTjNJzP" } }, - { - "cell_type": "markdown", - "metadata": { - "id": "DLI1JmHU7B0l" - }, - "source": [ - "## Weights & Biases Logging\n", - "\n", - "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", - "\n", - "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", - "\n", - "\n", - "\"Weights" - ] - }, { "cell_type": "markdown", "metadata": { diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 9b93fad7b203..be5c2fb71517 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,7 +16,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ +RUN pip install --no-cache -r requirements.txt albumentations comet clearml gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 941d09e19e2d..bc8dd7621579 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -84,10 +84,10 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.csv = True # always log to csv # Messages - if not wandb: - prefix = colorstr('Weights & Biases: ') - s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" - self.logger.info(s) + # if not wandb: + # prefix = colorstr('Weights & Biases: ') + # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" + # self.logger.info(s) if not clearml: prefix = colorstr('ClearML: ') s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" @@ -110,9 +110,9 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.opt.hyp = self.hyp # add hyperparameters self.wandb = WandbLogger(self.opt, run_id) # temp warn. because nested artifacts not supported after 0.12.10 - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - self.logger.warning(s) + # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): + # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." + # self.logger.warning(s) else: self.wandb = None diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index d2dd0fa7c6cd..238f4edbf2a0 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -135,7 +135,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): # Temporary-fix if opt.upload_dataset: opt.upload_dataset = False - LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") + # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") # Pre-training routine -- self.job_type = job_type From 85ae985b6a232f3a3e2f7400243cec2ca0b5f8d1 Mon Sep 17 00:00:00 2001 From: Vladislav Veklenko <71467601+vladoossss@users.noreply.github.com> Date: Thu, 13 Oct 2022 01:44:01 +0200 Subject: [PATCH 199/326] update mask2segments and saving results (#9785) * update mask2segments and saving results * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/segment/general.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/utils/segment/general.py b/utils/segment/general.py index 43bdc460f928..b526333dc5a1 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -126,9 +126,12 @@ def masks2segments(masks, strategy='largest'): segments = [] for x in masks.int().cpu().numpy().astype('uint8'): c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] - if strategy == 'concat': # concatenate all segments - c = np.concatenate([x.reshape(-1, 2) for x in c]) - elif strategy == 'largest': # select largest segment - c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + if c: + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + else: + c = np.zeros((0, 2)) # no segments found segments.append(c.astype('float32')) return segments From 16f87bb38e76a5aa14ee93252042063b678ece86 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 13 Oct 2022 02:32:06 +0200 Subject: [PATCH 200/326] HUB VOC fix (#9792) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/general.py b/utils/general.py index e2faca9dbf2a..d9d54d9e4f71 100644 --- a/utils/general.py +++ b/utils/general.py @@ -477,6 +477,7 @@ def check_dataset(data, autodownload=True): path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' if not path.is_absolute(): path = (ROOT / path).resolve() + data['path'] = path # download scripts for k in 'train', 'val', 'test': if data.get(k): # prepend path if isinstance(data[k], str): From 15b75659ddc2552bd9239db8a3c940322da49b80 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 13 Oct 2022 15:27:16 +0200 Subject: [PATCH 201/326] Update hubconf.py local repo Usage example (#9803) * Update hubconf.py Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index 95b95a5c30cc..2c6ec13f815c 100644 --- a/hubconf.py +++ b/hubconf.py @@ -4,8 +4,10 @@ Usage: import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s') - model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # custom model from branch + model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model + model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch + model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model + model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo """ import torch From 2a19d070d8a92bbf44dca8a40c503ec7406228d9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Oct 2022 12:28:52 +0200 Subject: [PATCH 202/326] Fix xView dataloaders import (#9807) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/xView.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/xView.yaml b/data/xView.yaml index b134ceac8164..770ab7870449 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -87,7 +87,7 @@ download: | from PIL import Image from tqdm import tqdm - from utils.datasets import autosplit + from utils.dataloaders import autosplit from utils.general import download, xyxy2xywhn From df80e7c723b5722fe5b8d935ace73b8b28572ed4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Oct 2022 18:18:58 +0200 Subject: [PATCH 203/326] Argoverse HUB fix (#9809) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/Argoverse.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index e3e9ba161ed0..558151dc849e 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -63,7 +63,7 @@ download: | # Download - dir = Path('../datasets/Argoverse') # dataset root dir + dir = Path(yaml['path']) # dataset root dir urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] download(urls, dir=dir, delete=False) From e42c89d4efc99bfbd8c5c208ffe67c11632da84a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 16 Oct 2022 20:51:32 +0200 Subject: [PATCH 204/326] `smart_optimizer()` revert to weight with decay (#9817) If a parameter does not fall into any other category Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/torch_utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 9f257d06ac60..04a3873854ee 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -319,12 +319,13 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): g = [], [], [] # optimizer parameter groups bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() for v in model.modules(): - if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay) - g[2].append(v.bias) - if isinstance(v, bn): # weight (no decay) - g[1].append(v.weight) - elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) - g[0].append(v.weight) + for p_name, p in v.named_parameters(recurse=0): + if p_name == 'bias': # bias (no decay) + g[2].append(p) + elif p_name == 'weight' and isinstance(v, bn): # weight (no decay) + g[1].append(p) + else: + g[0].append(p) # weight (with decay) if name == 'Adam': optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum From e3ff7806769444de864060494d1be8e18ce046a1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 Oct 2022 14:34:33 +0200 Subject: [PATCH 205/326] Allow PyTorch Hub results to display in notebooks (#9825) * Allow PyTorch Hub results to display in notebooks * fix CI * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * fix CI * fix CI * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * fix CI * fix CI Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 2 +- detect.py | 2 +- models/common.py | 13 +++++++++---- segment/predict.py | 2 +- utils/__init__.py | 2 +- utils/autoanchor.py | 2 +- utils/general.py | 17 +++++++++++++---- utils/metrics.py | 2 +- 8 files changed, 28 insertions(+), 14 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 9114aab1d703..9373649bf27d 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -91,7 +91,7 @@ def run( # Dataloader bs = 1 # batch_size if webcam: - view_img = check_imshow() + view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) bs = len(dataset) elif screenshot: diff --git a/detect.py b/detect.py index 8f48d8d28000..98af7235ea69 100644 --- a/detect.py +++ b/detect.py @@ -99,7 +99,7 @@ def run( # Dataloader bs = 1 # batch_size if webcam: - view_img = check_imshow() + view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: diff --git a/models/common.py b/models/common.py index d889d0292c61..e6da429de3e5 100644 --- a/models/common.py +++ b/models/common.py @@ -18,16 +18,20 @@ import requests import torch import torch.nn as nn +from IPython.display import display from PIL import Image from torch.cuda import amp +from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, - yaml_load) +from utils.general import (LOGGER, ROOT, Profile, check_imshow, check_requirements, check_suffix, check_version, + colorstr, increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, + xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode +CHECK_IMSHOW = check_imshow() + def autopad(k, p=None, d=1): # kernel, padding, dilation # Pad to 'same' shape outputs @@ -756,7 +760,7 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: - im.show(self.files[i]) # show + im.show(self.files[i]) if CHECK_IMSHOW else display(im) if save: f = self.files[i] im.save(save_dir / f) # save @@ -772,6 +776,7 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l LOGGER.info(f'Saved results to {save_dir}\n') return crops + @TryExcept('Showing images is not supported in this environment') def show(self, labels=True): self._run(show=True, labels=labels) # show results diff --git a/segment/predict.py b/segment/predict.py index 94117cd78633..44d6d3904c19 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -102,7 +102,7 @@ def run( # Dataloader bs = 1 # batch_size if webcam: - view_img = check_imshow() + view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: diff --git a/utils/__init__.py b/utils/__init__.py index 8403a6149827..0afe6f475625 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -23,7 +23,7 @@ def __enter__(self): def __exit__(self, exc_type, value, traceback): if value: - print(emojis(f'{self.msg}{value}')) + print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) return True diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 7e7e9985d68a..cfc4c276e3aa 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -26,7 +26,7 @@ def check_anchor_order(m): m.anchors[:] = m.anchors.flip(0) -@TryExcept(f'{PREFIX}ERROR: ') +@TryExcept(f'{PREFIX}ERROR') def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() diff --git a/utils/general.py b/utils/general.py index d9d54d9e4f71..76bc0b1d7a79 100644 --- a/utils/general.py +++ b/utils/general.py @@ -27,6 +27,7 @@ from zipfile import ZipFile import cv2 +import IPython import numpy as np import pandas as pd import pkg_resources as pkg @@ -73,6 +74,12 @@ def is_colab(): return 'COLAB_GPU' in os.environ +def is_notebook(): + # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace + ipython_type = str(type(IPython.get_ipython())) + return 'colab' in ipython_type or 'zmqshell' in ipython_type + + def is_kaggle(): # Is environment a Kaggle Notebook? return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' @@ -383,18 +390,20 @@ def check_img_size(imgsz, s=32, floor=0): return new_size -def check_imshow(): +def check_imshow(warn=False): # Check if environment supports image displays try: - assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' - assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' + assert not is_notebook() + assert not is_docker() + assert 'NoneType' not in str(type(IPython.get_ipython())) # SSH terminals, GitHub CI cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: - LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + if warn: + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') return False diff --git a/utils/metrics.py b/utils/metrics.py index ed611d7d38fa..f0bc787e1518 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -186,7 +186,7 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class - @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure: ') + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure') def plot(self, normalize=True, save_dir='', names=()): import seaborn as sn From acff977af3a6e23e9c25e932208efed73f9b7810 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 Oct 2022 15:30:42 +0200 Subject: [PATCH 206/326] Logger Cleanup (#9828) --- segment/train.py | 12 ------------ train.py | 4 +--- utils/general.py | 2 +- 3 files changed, 2 insertions(+), 16 deletions(-) diff --git a/segment/train.py b/segment/train.py index 26f0d0c13c78..5a5f15f10d84 100644 --- a/segment/train.py +++ b/segment/train.py @@ -91,17 +91,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio data_dict = None if RANK in {-1, 0}: logger = GenericLogger(opt=opt, console_logger=LOGGER) - # loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance - # if loggers.clearml: - # data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML - # if loggers.wandb: - # data_dict = loggers.wandb.data_dict - # if resume: - # weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size - # - # # Register actions - # for k in methods(loggers): - # callbacks.register_action(k, callback=getattr(loggers, k)) # Config plots = not evolve and not opt.noplots # create plots @@ -400,7 +389,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - # 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, 'opt': vars(opt), 'date': datetime.now().isoformat()} diff --git a/train.py b/train.py index 177e081c8c37..c24a8e81531d 100644 --- a/train.py +++ b/train.py @@ -53,7 +53,6 @@ one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers from utils.loggers.comet.comet_utils import check_comet_resume -from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve @@ -375,7 +374,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, 'opt': vars(opt), 'date': datetime.now().isoformat()} @@ -483,7 +481,7 @@ def main(opt, callbacks=Callbacks()): check_requirements() # Resume (from specified or most recent last.pt) - if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) and not opt.evolve: + if opt.resume and not check_comet_resume(opt) and not opt.evolve: last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml opt_data = opt.data # original dataset diff --git a/utils/general.py b/utils/general.py index 76bc0b1d7a79..8ea0ad07ed13 100644 --- a/utils/general.py +++ b/utils/general.py @@ -956,7 +956,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op x = torch.load(f, map_location=torch.device('cpu')) if x.get('ema'): x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys + for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 From f1482b0667a7cb116fde43132c1e140a9f3cee20 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 13:54:33 +0200 Subject: [PATCH 207/326] Remove ipython from `check_requirements` exclude list (#9841) May resolve https://github.com/ultralytics/yolov5/commit/e3ff7806769444de864060494d1be8e18ce046a1#commitcomment-87136818 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 2c6ec13f815c..41af8e39d14d 100644 --- a/hubconf.py +++ b/hubconf.py @@ -39,7 +39,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if not verbose: LOGGER.setLevel(logging.WARNING) - check_requirements(exclude=('ipython', 'opencv-python', 'tensorboard', 'thop')) + check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: From 010cd0db7d491484caae3c31754b2cf13156baa7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 15:25:21 +0200 Subject: [PATCH 208/326] Update HUBDatasetStats() usage examples (#9842) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index d849d5150f4b..5074d25ee268 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1005,13 +1005,18 @@ def verify_image_label(args): class HUBDatasetStats(): - """ Return dataset statistics dictionary with images and instances counts per split per class - To run in parent directory: export PYTHONPATH="$PWD/yolov5" - Usage1: from utils.dataloaders import *; HUBDatasetStats('coco128.yaml', autodownload=True) - Usage2: from utils.dataloaders import *; HUBDatasetStats('path/to/coco128_with_yaml.zip') + """ Class for generating HUB dataset JSON and `-hub` dataset directory + Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally + + Usage + from utils.dataloaders import HUBDatasetStats + stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1 + stats = HUBDatasetStats('path/to/coco128.zip') # usage 2 + stats.get_json(save=False) + stats.process_images() """ def __init__(self, path='coco128.yaml', autodownload=False): From d0df6c840372b77a7c075f2231914f53112e79eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 16:35:04 +0200 Subject: [PATCH 209/326] Update ZipFile to context manager (#9843) * Update zipFile to context manager * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 6 ++-- utils/downloads.py | 81 -------------------------------------------- utils/general.py | 14 ++++++-- 3 files changed, 15 insertions(+), 86 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5074d25ee268..37b3ffb2728b 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -17,7 +17,6 @@ from pathlib import Path from threading import Thread from urllib.parse import urlparse -from zipfile import ZipFile import numpy as np import torch @@ -31,7 +30,8 @@ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) + cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, + xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -1053,7 +1053,7 @@ def _unzip(self, path): if not str(path).endswith('.zip'): # path is data.yaml return False, None, path assert Path(path).is_file(), f'Error unzipping {path}, file not found' - ZipFile(path).extractall(path=path.parent) # unzip + unzip_file(path, path=path.parent) dir = path.with_suffix('') # dataset directory == zip name assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path diff --git a/utils/downloads.py b/utils/downloads.py index 60417c1f8835..21bb6608d5ba 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -5,12 +5,9 @@ import logging import os -import platform import subprocess -import time import urllib from pathlib import Path -from zipfile import ZipFile import requests import torch @@ -109,81 +106,3 @@ def github_assets(repository, version='latest'): error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') return str(file) - - -def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): - # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() - t = time.time() - file = Path(file) - cookie = Path('cookie') # gdrive cookie - print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') - if file.exists(): - file.unlink() # remove existing file - if cookie.exists(): - cookie.unlink() # remove existing cookie - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') - if os.path.exists('cookie'): # large file - s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' - else: # small file - s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' - r = os.system(s) # execute, capture return - if cookie.exists(): - cookie.unlink() # remove existing cookie - - # Error check - if r != 0: - if file.exists(): - file.unlink() # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if file.suffix == '.zip': - print('unzipping... ', end='') - ZipFile(file).extractall(path=file.parent) # unzip - file.unlink() # remove zip - - print(f'Done ({time.time() - t:.1f}s)') - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - - -# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- -# -# -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/utils/general.py b/utils/general.py index 8ea0ad07ed13..fb8484ce434e 100644 --- a/utils/general.py +++ b/utils/general.py @@ -511,7 +511,7 @@ def check_dataset(data, autodownload=True): LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root - ZipFile(f).extractall(path=DATASETS_DIR) # unzip + unzip_file(f, path=DATASETS_DIR) # unzip Path(f).unlink() # remove zip r = None # success elif s.startswith('bash '): # bash script @@ -566,6 +566,16 @@ def yaml_save(file='data.yaml', data={}): yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) +def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')): + # Unzip a *.zip file to path/, excluding files containing strings in exclude list + if path is None: + path = Path(file).parent # default path + with ZipFile(file) as zipObj: + for f in zipObj.namelist(): # list all archived filenames in the zip + if all(x not in f for x in exclude): + zipObj.extract(f, path=path) + + def url2file(url): # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ @@ -601,7 +611,7 @@ def download_one(url, dir): if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): LOGGER.info(f'Unzipping {f}...') if f.suffix == '.zip': - ZipFile(f).extractall(path=dir) # unzip + unzip_file(f, dir) # unzip elif f.suffix == '.tar': os.system(f'tar xf {f} --directory {f.parent}') # unzip elif f.suffix == '.gz': From c4710012d83ec46f1759b38555c989e3c23ea727 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 21:22:37 +0200 Subject: [PATCH 210/326] Update README.md (#9846) @pderrenger Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 8f45ccd229b5..52f2854dd601 100644 --- a/README.md +++ b/README.md @@ -168,22 +168,22 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
+ + + + + + - - - - - -
-|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow| +|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| ##
Why YOLOv5
From 6371de8879e7ad7ec5283e8b95cc6dd85d6a5e72 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 22:26:53 +0200 Subject: [PATCH 211/326] Webcam show fix (#9847) * Webcam show fix Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 8 +++----- utils/general.py | 1 - 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/models/common.py b/models/common.py index e6da429de3e5..ba18cbce7429 100644 --- a/models/common.py +++ b/models/common.py @@ -24,14 +24,12 @@ from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, Profile, check_imshow, check_requirements, check_suffix, check_version, - colorstr, increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, +from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, + increment_path, is_notebook, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode -CHECK_IMSHOW = check_imshow() - def autopad(k, p=None, d=1): # kernel, padding, dilation # Pad to 'same' shape outputs @@ -760,7 +758,7 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: - im.show(self.files[i]) if CHECK_IMSHOW else display(im) + display(im) if is_notebook() else im.show(self.files[i]) if save: f = self.files[i] im.save(save_dir / f) # save diff --git a/utils/general.py b/utils/general.py index fb8484ce434e..e1823b50ac56 100644 --- a/utils/general.py +++ b/utils/general.py @@ -395,7 +395,6 @@ def check_imshow(warn=False): try: assert not is_notebook() assert not is_docker() - assert 'NoneType' not in str(type(IPython.get_ipython())) # SSH terminals, GitHub CI cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() From 3b1a9d22a45f1e16e21c8e8ebec9ccd17068cd08 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Oct 2022 19:54:07 +0200 Subject: [PATCH 212/326] Fix OpenVINO Usage example (#9874) * Fix OpenVINO Usage example * Fix OpenVINO Usage example --- classify/predict.py | 2 +- classify/val.py | 2 +- detect.py | 2 +- export.py | 2 +- models/common.py | 4 ++-- segment/predict.py | 2 +- segment/val.py | 4 ++-- val.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 9373649bf27d..96508d633da8 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -15,7 +15,7 @@ $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch yolov5s-cls.torchscript # TorchScript yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls.xml # OpenVINO + yolov5s-cls_openvino_model # OpenVINO yolov5s-cls.engine # TensorRT yolov5s-cls.mlmodel # CoreML (macOS-only) yolov5s-cls_saved_model # TensorFlow SavedModel diff --git a/classify/val.py b/classify/val.py index 3c16ec8092d8..c0b507785fb0 100644 --- a/classify/val.py +++ b/classify/val.py @@ -10,7 +10,7 @@ $ python classify/val.py --weights yolov5s-cls.pt # PyTorch yolov5s-cls.torchscript # TorchScript yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls.xml # OpenVINO + yolov5s-cls_openvino_model # OpenVINO yolov5s-cls.engine # TensorRT yolov5s-cls.mlmodel # CoreML (macOS-only) yolov5s-cls_saved_model # TensorFlow SavedModel diff --git a/detect.py b/detect.py index 98af7235ea69..8e42fbe159d0 100644 --- a/detect.py +++ b/detect.py @@ -15,7 +15,7 @@ $ python detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO + yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel diff --git a/export.py b/export.py index 66d4d636133a..93845a0c14fa 100644 --- a/export.py +++ b/export.py @@ -28,7 +28,7 @@ $ python detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO + yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel diff --git a/models/common.py b/models/common.py index ba18cbce7429..af8132fffb7a 100644 --- a/models/common.py +++ b/models/common.py @@ -318,7 +318,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, # TorchScript: *.torchscript # ONNX Runtime: *.onnx # ONNX OpenCV DNN: *.onnx --dnn - # OpenVINO: *.xml + # OpenVINO: *_openvino_model # CoreML: *.mlmodel # TensorRT: *.engine # TensorFlow SavedModel: *_saved_model @@ -469,7 +469,7 @@ def gd_outputs(gd): check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') import paddle.inference as pdi if not Path(w).is_file(): # if not *.pdmodel - w = next(Path(w).rglob('*.pdmodel')) # get *.xml file from *_openvino_model dir + w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir weights = Path(w).with_suffix('.pdiparams') config = pdi.Config(str(w), str(weights)) if cuda: diff --git a/segment/predict.py b/segment/predict.py index 44d6d3904c19..3ae68240726a 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -15,7 +15,7 @@ $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-seg.xml # OpenVINO + yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel diff --git a/segment/val.py b/segment/val.py index f1ec54638d61..a875b3b79907 100644 --- a/segment/val.py +++ b/segment/val.py @@ -4,13 +4,13 @@ Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) - $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640- # validate COCO-segments + $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-seg.xml # OpenVINO + yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel diff --git a/val.py b/val.py index ca838c0beb2f..127acf810029 100644 --- a/val.py +++ b/val.py @@ -9,7 +9,7 @@ $ python val.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO + yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel From eef90572bf11602b17816a1721980cdb07a95eb2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Oct 2022 20:16:58 +0200 Subject: [PATCH 213/326] ClearML Dockerfile fix (#9876) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index be5c2fb71517..05776510e160 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,7 +16,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations comet clearml gsutil notebook Pillow>=9.1.0 \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From fba61e55836273847947498c01314499d8e5e7dc Mon Sep 17 00:00:00 2001 From: SSTato <1210546396@qq.com> Date: Mon, 24 Oct 2022 22:20:47 +0800 Subject: [PATCH 214/326] Windows Python 3.7 .isfile() fix (#9879) * Update dataloaders.py Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update ci-testing.yml Signed-off-by: SSTato <1210546396@qq.com> * Update ci-testing.yml Signed-off-by: SSTato <1210546396@qq.com> * Update ci-testing.yml Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update dataloaders.py Signed-off-by: SSTato <1210546396@qq.com> Signed-off-by: SSTato <1210546396@qq.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 2 +- utils/general.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 37b3ffb2728b..403252ff6227 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -344,7 +344,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr self.img_size = img_size self.stride = stride self.vid_stride = vid_stride # video frame-rate stride - sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources] + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] n = len(sources) self.sources = [clean_str(x) for x in sources] # clean source names for later self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n diff --git a/utils/general.py b/utils/general.py index e1823b50ac56..46978f1b8d7b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -426,12 +426,12 @@ def check_file(file, suffix=''): # Search/download file (if necessary) and return path check_suffix(file, suffix) # optional file = str(file) # convert to str() - if Path(file).is_file() or not file: # exists + if os.path.isfile(file) or not file: # exists return file elif file.startswith(('http:/', 'https:/')): # download url = file # warning: Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth - if Path(file).is_file(): + if os.path.isfile(file): LOGGER.info(f'Found {url} locally at {file}') # file already exists else: LOGGER.info(f'Downloading {url} to {file}...') @@ -586,7 +586,7 @@ def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry def download_one(url, dir): # Download 1 file success = True - if Path(url).is_file(): + if os.path.isfile(url): f = Path(url) # filename else: # does not exist f = dir / Path(url).name From 54f49fa581aac1d9909636bfc13f94001b08b55b Mon Sep 17 00:00:00 2001 From: paradigm Date: Tue, 25 Oct 2022 17:53:22 +0200 Subject: [PATCH 215/326] Add TFLite Metadata to TFLite and Edge TPU models (#9903) * added embedded meta data to tflite models * added try block for inference * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactored tfite meta data into separate function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Creat tmp file in /tmp * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py * Update export.py * Update export.py * Update export.py * Update common.py * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- export.py | 39 +++++++++++++++++++++++++++++++++++++-- models/common.py | 9 +++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 93845a0c14fa..e43d9b730fc6 100644 --- a/export.py +++ b/export.py @@ -45,6 +45,7 @@ """ import argparse +import contextlib import json import os import platform @@ -453,6 +454,39 @@ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): return f, None +def add_tflite_metadata(file, metadata, num_outputs): + # Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata + with contextlib.suppress(ImportError): + # check_requirements('tflite_support') + from tflite_support import flatbuffers + from tflite_support import metadata as _metadata + from tflite_support import metadata_schema_py_generated as _metadata_fb + + tmp_file = Path('/tmp/meta.txt') + with open(tmp_file, 'w') as meta_f: + meta_f.write(str(metadata)) + + model_meta = _metadata_fb.ModelMetadataT() + label_file = _metadata_fb.AssociatedFileT() + label_file.name = tmp_file.name + model_meta.associatedFiles = [label_file] + + subgraph = _metadata_fb.SubGraphMetadataT() + subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()] + subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs + model_meta.subgraphMetadata = [subgraph] + + b = flatbuffers.Builder(0) + b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) + metadata_buf = b.Output() + + populator = _metadata.MetadataPopulator.with_model_file(file) + populator.load_metadata_buffer(metadata_buf) + populator.load_associated_files([str(tmp_file)]) + populator.populate() + tmp_file.unlink() + + @smart_inference_mode() def run( data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' @@ -550,8 +584,9 @@ def run( f[6], _ = export_pb(s_model, file) if tflite or edgetpu: f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) - if edgetpu: - f[8], _ = export_edgetpu(file) + if edgetpu: + f[8], _ = export_edgetpu(file) + add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) if tfjs: f[9], _ = export_tfjs(file) if paddle: # PaddlePaddle diff --git a/models/common.py b/models/common.py index af8132fffb7a..6347e51cdf0b 100644 --- a/models/common.py +++ b/models/common.py @@ -3,10 +3,13 @@ Common modules """ +import ast +import contextlib import json import math import platform import warnings +import zipfile from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path @@ -462,6 +465,12 @@ def gd_outputs(gd): interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs + # load metadata + with contextlib.suppress(zipfile.BadZipFile): + with zipfile.ZipFile(w, "r") as model: + meta_file = model.namelist()[0] + meta = ast.literal_eval(model.read(meta_file).decode("utf-8")) + stride, names = int(meta['stride']), meta['names'] elif tfjs: # TF.js raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') elif paddle: # PaddlePaddle From 8236d8818bca21c692d5c4508fee2af835ec1dbe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 25 Oct 2022 18:13:48 +0200 Subject: [PATCH 216/326] Created using Colaboratory --- tutorial.ipynb | 141 +++---------------------------------------------- 1 file changed, 6 insertions(+), 135 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 63abebc5b37f..10e14b9b1208 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -954,7 +954,7 @@ "source": [ "# Appendix\n", "\n", - "Additional content below for PyTorch Hub, CI, reproducing results, profiling speeds, VOC training, classification training and TensorRT example." + "Additional content below." ] }, { @@ -963,145 +963,16 @@ "id": "GMusP4OAxFu6" }, "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", "import torch\n", "\n", - "# PyTorch Hub Model\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom\n", - "\n", - "# Images\n", - "img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list\n", - "\n", - "# Inference\n", - "results = model(img)\n", - "\n", - "# Results\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", "results.print() # or .show(), .save(), .crop(), .pandas(), etc." ], "execution_count": null, "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "FGH0ZjkGjejy" - }, - "source": [ - "# YOLOv5 CI\n", - "%%shell\n", - "rm -rf runs # remove runs/\n", - "m=yolov5n # official weights\n", - "b=runs/train/exp/weights/best # best.pt checkpoint\n", - "python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device 0 # train\n", - "for d in 0 cpu; do # devices\n", - " for w in $m $b; do # weights\n", - " python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val\n", - " python detect.py --imgsz 64 --weights $w.pt --device $d # detect\n", - " done\n", - "done\n", - "python hubconf.py --model $m # hub\n", - "python models/tf.py --weights $m.pt # build TF model\n", - "python models/yolo.py --cfg $m.yaml # build PyTorch model\n", - "python export.py --weights $m.pt --img 64 --include torchscript # export" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "mcKoSIK2WSzj" - }, - "source": [ - "# Reproduce\n", - "for x in (f'yolov5{x}' for x in 'nsmlx'):\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --task speed # speed\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "gogI-kwi3Tye" - }, - "source": [ - "# Profile\n", - "from utils.torch_utils import profile\n", - "\n", - "m1 = lambda x: x * torch.sigmoid(x)\n", - "m2 = torch.nn.SiLU()\n", - "results = profile(input=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "BSgFCAcMbk1R" - }, - "source": [ - "# VOC\n", - "for b, m in zip([64, 64, 64, 32, 16], [f'yolov5{x}' for x in 'nsmlx']): # batch, model\n", - " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.VOC.yaml --project VOC --name {m} --cache" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Classification train\n", - "for m in [*(f'yolov5{x}-cls.pt' for x in 'nsmlx'), 'resnet50.pt', 'resnet101.pt', 'efficientnet_b0.pt', 'efficientnet_b1.pt']:\n", - " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", - " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" - ], - "metadata": { - "id": "UWGH7H6yakVl" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Classification val\n", - "!bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G - 50000 images)\n", - "!python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate" - ], - "metadata": { - "id": "yYgOiFNHZx-1" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Validate on COCO test. Zip results.json and submit to eval server at https://competitions.codalab.org/competitions/20794\n", - "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7G - 40000 images, test 20000)\n", - "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" - ], - "metadata": { - "id": "aq4DPWGu0Bl1" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "VTRwsvA9u7ln" - }, - "source": [ - "# TensorRT \n", - "!pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # install\n", - "!python export.py --weights yolov5s.pt --include engine --imgsz 640 --device 0 # export\n", - "!python detect.py --weights yolov5s.engine --imgsz 640 --device 0 # inference" - ], - "execution_count": null, - "outputs": [] } ] -} +} \ No newline at end of file From a5d875adcac05f8f68329c2cb742aba742d1953d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Oct 2022 13:42:52 +0200 Subject: [PATCH 217/326] Add `gnupg` to Dockerfile-cpu (#9932) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index d6fac645dba1..f3f81ec02c23 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -11,7 +11,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages RUN apt update RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev +RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 # Install pip packages From f9bb984e817a71a90490ed3a4655fb7ad408d8fb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Oct 2022 14:06:05 +0200 Subject: [PATCH 218/326] Add ClearML minimum version requirement (#9933) * Add ClearML minimum version requirement Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- utils/loggers/clearml/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 52f7b9ea57d2..8cb1bd4c6fe1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,7 +16,7 @@ tqdm>=4.64.0 # Logging ------------------------------------- tensorboard>=2.4.1 -# clearml +# clearml>=1.2.0 # comet # Plotting ------------------------------------ diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index 64eef6befc93..e0c5824bc2a2 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -54,7 +54,7 @@ That's it! You're done 😎 To enable ClearML experiment tracking, simply install the ClearML pip package. ```bash -pip install clearml +pip install clearml>=1.2.0 ``` This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py` From 32a92185738c93e5f0b0f6971de0812cd6fd5f34 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Oct 2022 23:51:40 +0200 Subject: [PATCH 219/326] Update Comet Integrations table text (#9937) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 52f2854dd601..dc21ad8d6639 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 |Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| ##
Why YOLOv5
From 38e5aae9a20522b69e21629f1558ab8902b351f6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Oct 2022 17:37:25 +0200 Subject: [PATCH 220/326] Update README.md (#9957) * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 7 +------ README.md | 12 ++++-------- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 7e8aa6f7f087..981fd8a5b820 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,12 +1,7 @@

- -

- -   - - +

[English](../README.md) | 简体中文 diff --git a/README.md b/README.md index dc21ad8d6639..98cad8de4294 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,7 @@

- -

- -   - - +

English | [简体中文](.github/README_cn.md) @@ -23,8 +18,9 @@

- YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. + YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. To request a commercial license please complete the form at Ultralytics Licensing. +

From be9ef3871e85d6e06b736f08a1c9f1d01998afe6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Oct 2022 17:48:41 +0200 Subject: [PATCH 221/326] Update README.md (#9958) * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 2 +- README.md | 2 +- tutorial.ipynb | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 981fd8a5b820..d0cf6b9ff3bd 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,7 +1,7 @@

- +

[English](../README.md) | 简体中文 diff --git a/README.md b/README.md index 98cad8de4294..64a2e9001538 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

English | [简体中文](.github/README_cn.md) diff --git a/tutorial.ipynb b/tutorial.ipynb index 10e14b9b1208..b40f08ef20b3 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -371,7 +371,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -975,4 +975,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 6db0fac66cfb78697af21dc12d434774e4ccbcab Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Oct 2022 18:25:33 +0200 Subject: [PATCH 222/326] Update README.md (#9961) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 64a2e9001538..8dbf527c2dbd 100644 --- a/README.md +++ b/README.md @@ -339,8 +339,7 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare ##
Contact
-For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or -professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact). +For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). To request a commercial license please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
From 575055ce7028ee99618ff1c5c0e8919e8e2cd849 Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Fri, 28 Oct 2022 21:16:03 +0200 Subject: [PATCH 223/326] Switch from suffix checks to archive checks (#9963) * fix: switched from suffix checks to archive checks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup seems like both functions accept Path type input so removing str() * import always Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/utils/general.py b/utils/general.py index 46978f1b8d7b..88cefb7bb662 100644 --- a/utils/general.py +++ b/utils/general.py @@ -23,8 +23,9 @@ from multiprocessing.pool import ThreadPool from pathlib import Path from subprocess import check_output +from tarfile import is_tarfile from typing import Optional -from zipfile import ZipFile +from zipfile import ZipFile, is_zipfile import cv2 import IPython @@ -465,7 +466,7 @@ def check_dataset(data, autodownload=True): # Download (optional) extract_dir = '' - if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip + if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) extract_dir, autodownload = data.parent, False @@ -607,11 +608,11 @@ def download_one(url, dir): else: LOGGER.warning(f'❌ Failed to download {url}...') - if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): + if unzip and success and (f.suffix == '.gz' or is_zipfile(f) or is_tarfile(f)): LOGGER.info(f'Unzipping {f}...') - if f.suffix == '.zip': + if is_zipfile(f): unzip_file(f, dir) # unzip - elif f.suffix == '.tar': + elif is_tarfile(f): os.system(f'tar xf {f} --directory {f.parent}') # unzip elif f.suffix == '.gz': os.system(f'tar xfz {f} --directory {f.parent}') # unzip From 6e544d5f7c0b699c7c6002074b822a03308bbe3c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 Oct 2022 13:31:01 +0200 Subject: [PATCH 224/326] FROM nvcr.io/nvidia/pytorch:22.10-py3 (#9966) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 05776510e160..87605456a5d9 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.09-py3 +FROM nvcr.io/nvidia/pytorch:22.10-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From a625f29967d09beeee1f010313a05ad7d5997c32 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 30 Oct 2022 22:09:36 +0100 Subject: [PATCH 225/326] Full-size proto code (optional) (#9980) * Update tf.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tf.py Signed-off-by: Glenn Jocher * Update tf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/models/tf.py b/models/tf.py index 1446d8841646..3f3dc8dbe7e7 100644 --- a/models/tf.py +++ b/models/tf.py @@ -333,6 +333,7 @@ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w def call(self, x): p = self.proto(x[0]) + # p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160) x = self.detect(self, x) return (x, p) if self.training else (x[0], p) @@ -355,8 +356,8 @@ class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() - assert scale_factor == 2, "scale_factor must be 2" - self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) + assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) # with default arguments: align_corners=False, half_pixel_centers=False # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, From e704970f7f606d6d3e58641e9384f38b532aa846 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 31 Oct 2022 12:43:17 +0100 Subject: [PATCH 226/326] Update README.md (#9970) * Update README.md @taliabender updated spacing per our convo Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8dbf527c2dbd..7cb4d09446ca 100644 --- a/README.md +++ b/README.md @@ -19,8 +19,10 @@

YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. To request a commercial license please complete the form at Ultralytics Licensing. - + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ To request a commercial license please complete the form at Ultralytics Licensing. +

From a83d2a50132982fa89a22420155f6c9f097a92c7 Mon Sep 17 00:00:00 2001 From: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Date: Mon, 31 Oct 2022 15:25:11 +0100 Subject: [PATCH 227/326] Segmentation Tutorial (#9521) * Added a tutorial notebook for segmentation. * Updated header for segmentation tutorial and included other YOLOv5 sponsor sections. * Updated segmentation tutorial to match main object detection tutorial. * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Ayush Chaurasia Co-authored-by: Glenn Jocher --- segment/tutorial.ipynb | 956 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 956 insertions(+) create mode 100644 segment/tutorial.ipynb diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb new file mode 100644 index 000000000000..47559978be74 --- /dev/null +++ b/segment/tutorial.ipynb @@ -0,0 +1,956 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 107.3/196.6 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", + "\n", + "```shell\n", + "python segment/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", + "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "Fusing layers... \n", + "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 5.6ms\n", + "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 5.5ms\n", + "Speed: 0.4ms pre-process, 5.6ms inference, 1.1ms NMS per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n", + "#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 49, + "referenced_widgets": [ + "9b8caa3522fc4cbab31e13b5dfc7808d", + "574140e4c4bc48c9a171541a02cd0211", + "35e03ce5090346c9ae602891470fc555", + "c942c208e72d46568b476bb0f2d75496", + "65881db1db8a4e9c930fab9172d45143", + "60b913d755b34d638478e30705a2dde1", + "0856bea36ec148b68522ff9c9eb258d8", + "76879f6f2aa54637a7a07faeea2bd684", + "0ace3934ec6f4d36a1b3a9e086390926", + "d6b7a2243e0c4beca714d99dceec23d6", + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" + ] + }, + "id": "WQPtK1QYVaD_", + "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "89f5f0a84ca642378724f1bf05f17e0d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0.00/6.79M [00:00

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train-seg\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " %pip install -q clearml && clearml-init" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", + " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", + "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", + "\n", + "Transferred 367/367 items from yolov5s-seg.pt\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' im\u001b[0m\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 544.41\u001b[0m\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' imag\u001b[0m\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 138.66it\u001b[0m\n", + "\n", + "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n", + "Plotting labels to runs/train-seg/exp/labels.jpg... \n", + "Image sizes 640 train, 640 val\n", + "Using 4 dataloader workers\n", + "Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n", + "Starting training for 3 epochs...\n", + "\n", + " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", + " 0/2 4.67G 0.04464 0.05134 0.06548 0.01895 219 \n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.727 0.661 0.725 0.496 0.688 0.629 0.673 0.413\n", + "\n", + " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", + " 1/2 6.36G 0.04102 0.04702 0.06873 0.01734 263 \n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.752 0.676 0.743 0.51 0.704 0.64 0.682 0.425\n", + "\n", + " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", + " 2/2 6.36G 0.0421 0.04463 0.05951 0.01746 245 \n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.776 0.674 0.757 0.514 0.72 0.632 0.684 0.429\n", + "\n", + "3 epochs completed in 0.006 hours.\n", + "Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n", + "Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n", + "\n", + "Validating runs/train-seg/exp/weights/best.pt...\n", + "Fusing layers... \n", + "Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.775 0.673 0.758 0.515 0.72 0.632 0.684 0.427\n", + " person 128 254 0.829 0.745 0.833 0.545 0.776 0.697 0.764 0.406\n", + " bicycle 128 6 0.614 0.333 0.539 0.331 0.614 0.333 0.531 0.308\n", + " car 128 46 0.774 0.413 0.571 0.266 0.693 0.37 0.493 0.204\n", + " motorcycle 128 5 0.817 0.901 0.895 0.678 0.817 0.901 0.895 0.47\n", + " airplane 128 6 1 0.951 0.995 0.71 0.882 0.833 0.839 0.515\n", + " bus 128 7 0.695 0.714 0.757 0.661 0.695 0.714 0.757 0.627\n", + " train 128 3 1 0.935 0.995 0.566 1 0.935 0.995 0.731\n", + " truck 128 12 0.741 0.417 0.463 0.283 0.741 0.417 0.4 0.27\n", + " boat 128 6 0.653 0.32 0.452 0.17 0.653 0.32 0.328 0.149\n", + " traffic light 128 14 0.627 0.36 0.527 0.234 0.503 0.289 0.409 0.293\n", + " stop sign 128 2 0.829 1 0.995 0.747 0.829 1 0.995 0.821\n", + " bench 128 9 0.822 0.667 0.76 0.414 0.685 0.556 0.678 0.228\n", + " bird 128 16 0.967 1 0.995 0.675 0.906 0.938 0.909 0.516\n", + " cat 128 4 0.778 0.89 0.945 0.728 0.778 0.89 0.945 0.69\n", + " dog 128 9 1 0.65 0.973 0.697 1 0.65 0.939 0.615\n", + " horse 128 2 0.727 1 0.995 0.672 0.727 1 0.995 0.2\n", + " elephant 128 17 1 0.912 0.946 0.704 0.871 0.794 0.822 0.565\n", + " bear 128 1 0.626 1 0.995 0.895 0.626 1 0.995 0.895\n", + " zebra 128 4 0.865 1 0.995 0.934 0.865 1 0.995 0.822\n", + " giraffe 128 9 0.975 1 0.995 0.672 0.866 0.889 0.876 0.473\n", + " backpack 128 6 1 0.573 0.707 0.38 0.891 0.5 0.524 0.249\n", + " umbrella 128 18 0.744 0.889 0.926 0.552 0.465 0.556 0.483 0.262\n", + " handbag 128 19 0.799 0.209 0.432 0.225 0.799 0.209 0.403 0.201\n", + " tie 128 7 0.968 0.857 0.857 0.53 0.968 0.857 0.857 0.519\n", + " suitcase 128 4 0.821 1 0.995 0.696 0.821 1 0.995 0.665\n", + " frisbee 128 5 0.777 0.8 0.761 0.613 0.777 0.8 0.761 0.558\n", + " skis 128 1 0.721 1 0.995 0.497 0.721 1 0.995 0.398\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " snowboard 128 7 0.851 0.857 0.887 0.599 0.284 0.286 0.253 0.151\n", + " sports ball 128 6 0.961 0.667 0.687 0.429 0.721 0.5 0.481 0.476\n", + " kite 128 10 0.508 0.312 0.48 0.238 0.508 0.312 0.406 0.122\n", + " baseball bat 128 4 0.331 0.5 0.526 0.249 0.331 0.5 0.376 0.102\n", + " baseball glove 128 7 0.876 0.571 0.579 0.282 0.657 0.429 0.429 0.343\n", + " skateboard 128 5 1 0.697 0.824 0.471 0.707 0.497 0.552 0.299\n", + " tennis racket 128 7 0.524 0.714 0.646 0.426 0.524 0.714 0.646 0.452\n", + " bottle 128 18 0.657 0.389 0.531 0.359 0.657 0.389 0.569 0.362\n", + " wine glass 128 16 0.752 0.938 0.924 0.435 0.451 0.562 0.568 0.341\n", + " cup 128 36 0.859 0.676 0.848 0.503 0.823 0.648 0.793 0.496\n", + " fork 128 6 0.904 0.333 0.462 0.309 0.452 0.167 0.195 0.107\n", + " knife 128 16 0.749 0.5 0.665 0.413 0.655 0.438 0.523 0.314\n", + " spoon 128 22 0.787 0.409 0.577 0.275 0.787 0.409 0.528 0.236\n", + " bowl 128 28 0.793 0.679 0.744 0.577 0.751 0.643 0.688 0.366\n", + " banana 128 1 0.931 1 0.995 0.398 0.931 1 0.995 0.497\n", + " sandwich 128 2 1 0 0.828 0.713 1 0 0.498 0.449\n", + " orange 128 4 0.588 1 0.995 0.666 0.588 1 0.995 0.672\n", + " broccoli 128 11 0.563 0.455 0.356 0.258 0.563 0.455 0.362 0.259\n", + " carrot 128 24 0.683 0.75 0.753 0.489 0.758 0.833 0.835 0.451\n", + " hot dog 128 2 0.583 1 0.995 0.995 0.583 1 0.995 0.796\n", + " pizza 128 5 0.801 0.8 0.962 0.644 0.801 0.8 0.962 0.583\n", + " donut 128 14 0.704 1 0.889 0.759 0.704 1 0.889 0.683\n", + " cake 128 4 0.904 1 0.995 0.896 0.904 1 0.995 0.838\n", + " chair 128 35 0.672 0.543 0.629 0.333 0.708 0.571 0.583 0.284\n", + " couch 128 6 0.827 0.5 0.821 0.583 0.827 0.5 0.681 0.352\n", + " potted plant 128 14 0.809 0.908 0.884 0.584 0.809 0.908 0.884 0.474\n", + " bed 128 3 1 0.654 0.913 0.36 1 0.654 0.913 0.418\n", + " dining table 128 13 0.803 0.385 0.557 0.361 0.321 0.154 0.126 0.0487\n", + " toilet 128 2 0.802 1 0.995 0.921 0.802 1 0.995 0.698\n", + " tv 128 2 0.59 1 0.995 0.846 0.59 1 0.995 0.846\n", + " laptop 128 3 1 0 0.451 0.324 1 0 0.372 0.157\n", + " mouse 128 2 1 0 0 0 1 0 0 0\n", + " remote 128 8 0.831 0.5 0.625 0.495 0.831 0.5 0.629 0.436\n", + " cell phone 128 8 0.867 0.375 0.482 0.26 0.578 0.25 0.302 0.127\n", + " microwave 128 3 0.782 1 0.995 0.695 0.782 1 0.995 0.585\n", + " oven 128 5 0.389 0.4 0.432 0.299 0.584 0.6 0.642 0.411\n", + " sink 128 6 0.657 0.5 0.491 0.373 0.657 0.5 0.436 0.303\n", + " refrigerator 128 5 0.729 0.8 0.778 0.547 0.729 0.8 0.778 0.496\n", + " book 128 29 0.77 0.231 0.451 0.186 0.77 0.231 0.399 0.136\n", + " clock 128 9 0.798 0.889 0.956 0.747 0.798 0.889 0.926 0.68\n", + " vase 128 2 0.437 1 0.995 0.895 0.437 1 0.995 0.796\n", + " scissors 128 1 0 0 0.0226 0.0113 0 0 0 0\n", + " teddy bear 128 21 0.815 0.629 0.877 0.521 0.753 0.582 0.793 0.435\n", + " toothbrush 128 5 1 0.719 0.995 0.737 1 0.719 0.995 0.606\n", + "Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Train YOLOv5s on COCO128 for 3 epochs\n", + "!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "machine_shape": "hm", + "name": "YOLOv5 Tutorial", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "0856bea36ec148b68522ff9c9eb258d8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0ace3934ec6f4d36a1b3a9e086390926": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "35e03ce5090346c9ae602891470fc555": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", + "max": 818322941, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", + "value": 818322941 + } + }, + "574140e4c4bc48c9a171541a02cd0211": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", + "placeholder": "​", + "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", + "value": "100%" + } + }, + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "60b913d755b34d638478e30705a2dde1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "65881db1db8a4e9c930fab9172d45143": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "76879f6f2aa54637a7a07faeea2bd684": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9b8caa3522fc4cbab31e13b5dfc7808d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", + "IPY_MODEL_35e03ce5090346c9ae602891470fc555", + "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" + ], + "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" + } + }, + "c942c208e72d46568b476bb0f2d75496": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", + "placeholder": "​", + "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", + "value": " 780M/780M [02:19<00:00, 6.24MB/s]" + } + }, + "d6b7a2243e0c4beca714d99dceec23d6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} From 49156eb1d18b6314554333c4bdae5ee3e6102992 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 12:02:05 +0100 Subject: [PATCH 228/326] Fix `is_colab()` (#9994) @AyushExel @kalenmike Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 88cefb7bb662..5cf156dfe664 100644 --- a/utils/general.py +++ b/utils/general.py @@ -72,7 +72,7 @@ def is_chinese(s='人工智能'): def is_colab(): # Is environment a Google Colab instance? - return 'COLAB_GPU' in os.environ + return 'google.colab' in sys.modules def is_notebook(): From cf99788823dc952b9a5f11fd8be869235e172122 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 14:27:36 +0100 Subject: [PATCH 229/326] Check online twice on AutoUpdate (#9999) Increased robustness to network failures Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 5cf156dfe664..cdf4f502fc9c 100644 --- a/utils/general.py +++ b/utils/general.py @@ -369,7 +369,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - assert check_online(), "AutoUpdate skipped (offline)" + assert check_online() or check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ From c55e2cd73b472de808665f8337d6edeaebb74521 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 14:53:14 +0100 Subject: [PATCH 230/326] Add `min_items` filter option (#9997) * Add `min_items` filter option @AyushExel @Laughing-q dataset filter Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 17 +++++++++++++++-- utils/segment/dataloaders.py | 3 ++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 403252ff6227..6b6e83e30456 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -444,6 +444,7 @@ def __init__(self, single_cls=False, stride=32, pad=0.0, + min_items=0, prefix=''): self.img_size = img_size self.augment = augment @@ -475,7 +476,7 @@ def __init__(self, # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.im_files, f'{prefix}No images found' except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') + raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e # Check cache self.label_files = img2label_paths(self.im_files) # labels @@ -505,7 +506,19 @@ def __init__(self, self.shapes = np.array(shapes) self.im_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update - n = len(shapes) # number of images + + # Filter images + if min_items: + include = np.array([len(x) > min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{nf - len(include)}/{nf} images filtered from dataset') + self.im_files = [self.im_files[i] for i in include] + self.label_files = [self.label_files[i] for i in include] + self.labels = [self.labels[i] for i in include] + self.segments = [self.segments[i] for i in include] + self.shapes = self.shapes[include] # wh + + # Create indices + n = len(self.shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index a63d6ec013fd..9de6f0fbf903 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -93,12 +93,13 @@ def __init__( single_cls=False, stride=32, pad=0, + min_items=0, prefix="", downsample_ratio=1, overlap=False, ): super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, - stride, pad, prefix) + stride, pad, min_items, prefix) self.downsample_ratio = downsample_ratio self.overlap = overlap From 067ad9a2d1162fd33e6d47321e3f1d860b6df0e0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 15:55:18 +0100 Subject: [PATCH 231/326] Improved `check_online()` robustness (#10000) * Improved check_online() robustness YOLOv5-wide improvement, not just in check_requirements() Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/utils/general.py b/utils/general.py index cdf4f502fc9c..aae466ba5c90 100644 --- a/utils/general.py +++ b/utils/general.py @@ -283,11 +283,16 @@ def file_size(path): def check_online(): # Check internet connectivity import socket - try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility - return True - except OSError: - return False + + def run_once(): + # Check once + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + return True + except OSError: + return False + + return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues def git_describe(path=ROOT): # path must be a directory @@ -369,7 +374,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - assert check_online() or check_online(), "AutoUpdate skipped (offline)" + assert check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ From 82a558557a825d380178527f4b0ff175f33457fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 16:41:37 +0100 Subject: [PATCH 232/326] Fix `min_items` (#10001) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6b6e83e30456..4e5b75edb5c2 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -509,8 +509,8 @@ def __init__(self, # Filter images if min_items: - include = np.array([len(x) > min_items for x in self.labels]).nonzero()[0].astype(int) - LOGGER.info(f'{prefix}{nf - len(include)}/{nf} images filtered from dataset') + include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset') self.im_files = [self.im_files[i] for i in include] self.label_files = [self.label_files[i] for i in include] self.labels = [self.labels[i] for i in include] From 02b8a4c21bb6d9419bbf01d4af20724743dab58b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Nov 2022 17:58:15 +0100 Subject: [PATCH 233/326] Update default `--epochs 100` (#10024) * Update default `--epochs 100` @AyushExel @kalenmike updating default Detection and Segmentation trainings to 100 epochs Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 10 +++++----- README.md | 10 +++++----- segment/train.py | 2 +- train.py | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index d0cf6b9ff3bd..4184c4c683d0 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -111,11 +111,11 @@ python detect.py --source 0 # 网络摄像头 数据集结果. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 自动从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中下载。YOLOv5n/s/m/l/x的训练时间在V100 GPU上是 1/2/4/6/8天(多GPU倍速). 尽可能使用最大的 `--batch-size`, 或通过 `--batch-size -1` 来实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092). 批量大小显示为 V100-16GB。 ```bash -python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 ``` diff --git a/README.md b/README.md index 7cb4d09446ca..efe7d4b090bc 100644 --- a/README.md +++ b/README.md @@ -126,11 +126,11 @@ largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. ```bash -python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 ``` diff --git a/segment/train.py b/segment/train.py index 5a5f15f10d84..7950f95df4f2 100644 --- a/segment/train.py +++ b/segment/train.py @@ -463,7 +463,7 @@ def parse_opt(known=False): parser.add_argument('--cfg', type=str, default='', help='model.yaml path') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') diff --git a/train.py b/train.py index c24a8e81531d..e882748581bf 100644 --- a/train.py +++ b/train.py @@ -433,7 +433,7 @@ def parse_opt(known=False): parser.add_argument('--cfg', type=str, default='', help='model.yaml path') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') From fde77584687041aa62795bb2c27e895cf73686bf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Nov 2022 15:30:12 +0100 Subject: [PATCH 234/326] YOLOv5 AutoCache Update (#10027) * AutoCache * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * AutoCache * AutoCache * AutoCache * AutoCache Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/train.py | 2 +- train.py | 2 +- utils/__init__.py | 1 - utils/dataloaders.py | 34 +++++++++++++++++++++++++++------- utils/general.py | 2 +- 5 files changed, 30 insertions(+), 11 deletions(-) diff --git a/segment/train.py b/segment/train.py index 7950f95df4f2..f067918e7c3c 100644 --- a/segment/train.py +++ b/segment/train.py @@ -474,7 +474,7 @@ def parse_opt(known=False): parser.add_argument('--noplots', action='store_true', help='save no plot files') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') diff --git a/train.py b/train.py index e882748581bf..1fe6cf4d9ebd 100644 --- a/train.py +++ b/train.py @@ -444,7 +444,7 @@ def parse_opt(known=False): parser.add_argument('--noplots', action='store_true', help='save no plot files') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') diff --git a/utils/__init__.py b/utils/__init__.py index 0afe6f475625..8354d91c4269 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -47,7 +47,6 @@ def notebook_init(verbose=True): from utils.general import check_font, check_requirements, is_colab from utils.torch_utils import select_device # imports - check_requirements(('psutil', 'IPython')) check_font() import psutil diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 4e5b75edb5c2..b33a24a46f9c 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -19,6 +19,7 @@ from urllib.parse import urlparse import numpy as np +import psutil import torch import torch.nn.functional as F import torchvision @@ -30,8 +31,8 @@ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, - xyxy2xywhn) + colorstr, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, + xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -564,24 +565,43 @@ def __init__(self, self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride - # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) + # Cache images into RAM/disk for faster training + if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): + cache_images = False self.ims = [None] * n self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] if cache_images: - gb = 0 # Gigabytes of cached images + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) for i, x in pbar: if cache_images == 'disk': - gb += self.npy_files[i].stat().st_size + b += self.npy_files[i].stat().st_size else: # 'ram' self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) - gb += self.ims[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' + b += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' pbar.close() + def check_cache_ram(self, safety_margin=0.1, prefix=''): + # Check image caching requirements vs available memory + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + n = min(self.n, 30) # extrapolate from 30 random images + for _ in range(n): + im = cv2.imread(random.choice(self.im_files)) # sample image + ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio + b += im.nbytes * ratio ** 2 + mem_required = b * self.n / n # GB required to cache dataset into RAM + mem = psutil.virtual_memory() + cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question + if not cache: + LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, " + f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " + f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + return cache + def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict diff --git a/utils/general.py b/utils/general.py index aae466ba5c90..0c3b44d7f9b0 100644 --- a/utils/general.py +++ b/utils/general.py @@ -374,7 +374,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - assert check_online(), "AutoUpdate skipped (offline)" + # assert check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ From 78ed31c95a3b01c98a39a5b2edceb48ab630c95d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 6 Nov 2022 15:06:18 +0100 Subject: [PATCH 235/326] IoU `eps` adjustment (#10051) IoU eps adjustment Unify h1 and h2 with eps values Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/metrics.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index f0bc787e1518..3b854d4f1583 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -234,8 +234,8 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 else: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps # Intersection area inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ @@ -253,7 +253,7 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU From 6ae3dff7d48bd914a5ab5d20e277b8222cd547c7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 6 Nov 2022 18:43:07 +0100 Subject: [PATCH 236/326] Update get_coco.sh (#10057) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/scripts/get_coco.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 506d46df9fb5..0d388b0a12a8 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -30,7 +30,7 @@ url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ if [ "$segments" == "true" ]; then f='coco2017labels-segments.zip' # 168 MB else - f='coco2017labels.zip' # 168 MB + f='coco2017labels.zip' # 46 MB fi echo 'Downloading' $url$f ' ...' curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & From 58b3d078543ed92bb960ec3f213291c5fd459e43 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 22:56:23 +0100 Subject: [PATCH 237/326] [pre-commit.ci] pre-commit suggestions (#10068) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v2.38.2 → v3.2.0](https://github.com/asottile/pyupgrade/compare/v2.38.2...v3.2.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1cd102c26b41..0106b4aab523 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.38.2 + rev: v3.2.0 hooks: - id: pyupgrade name: Upgrade code From e00d02d78b772d7848689d8947238e4b05986a54 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Nov 2022 23:07:10 +0100 Subject: [PATCH 238/326] Use MNIST160 (#10069) New 160-image MNIST subset composed of first 8 examples of each class. Suitable for fast CI. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 1ec68e8412f9..f31bb6e6ce3c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -155,11 +155,11 @@ jobs: run: | m=${{ matrix.model }}-cls.pt # official weights b=runs/train-cls/exp/weights/best.pt # best.pt checkpoint - python classify/train.py --imgsz 32 --model $m --data mnist2560 --epochs 1 # train - python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist2560 # val - python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist2560/test/7/60.png # predict + python classify/train.py --imgsz 32 --model $m --data mnist160 --epochs 1 # train + python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist160 # val + python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist160/test/7/60.png # predict python classify/predict.py --imgsz 32 --weights $m --source data/images/bus.jpg # predict - python export.py --weights $b --img 64 --imgsz 224 --include torchscript # export + python export.py --weights $b --img 64 --include torchscript # export python - < Date: Tue, 8 Nov 2022 00:58:00 +0100 Subject: [PATCH 239/326] Update Dockerfile keep default torch installation (#10071) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 87605456a5d9..7ec6efaeacba 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- # Install pip packages COPY requirements.txt . RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext torch torchvision +RUN pip uninstall -y Pillow torchtext # torch torchvision RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From 86decb3c49f91547975d7b7399290eb247888f6f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Nov 2022 15:05:19 +0100 Subject: [PATCH 240/326] Add `ultralytics` pip package (#10103) --- requirements.txt | 23 ++++++++++++----------- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/requirements.txt b/requirements.txt index 8cb1bd4c6fe1..70dd7ce53ba3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,29 +1,32 @@ -# YOLOv5 requirements +# YOLOv5 🚀 requirements # Usage: pip install -r requirements.txt -# Base ---------------------------------------- +# Base ------------------------------------------------------------------------ +ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.1 Pillow>=7.1.2 +psutil # system resources PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 -torch>=1.7.0 # see https://pytorch.org/get-started/locally/ (recommended) +thop>=0.1.1 # FLOPs computation +torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.8.1 tqdm>=4.64.0 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 -# Logging ------------------------------------- +# Logging --------------------------------------------------------------------- tensorboard>=2.4.1 # clearml>=1.2.0 # comet -# Plotting ------------------------------------ +# Plotting -------------------------------------------------------------------- pandas>=1.1.4 seaborn>=0.11.0 -# Export -------------------------------------- +# Export ---------------------------------------------------------------------- # coremltools>=6.0 # CoreML export # onnx>=1.9.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier @@ -34,14 +37,12 @@ seaborn>=0.11.0 # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export -# Deploy -------------------------------------- +# Deploy ---------------------------------------------------------------------- # tritonclient[all]~=2.24.0 -# Extras -------------------------------------- -ipython # interactive notebook -psutil # system utilization -thop>=0.1.1 # FLOPs computation +# Extras ---------------------------------------------------------------------- # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0 # COCO mAP # roboflow +# ultralytics # HUB https://hub.ultralytics.com diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 7ec6efaeacba..a5035c6abc33 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,7 +16,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext # torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook Pillow>=9.1.0 \ +RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 6e8ff77545c5..8ec71622d9b6 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -17,7 +17,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt gsutil notebook \ +RUN pip install --no-cache -r requirements.txt ultralytics gsutil notebook \ tensorflow-aarch64 # tensorflowjs \ # onnx onnx-simplifier onnxruntime \ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index f3f81ec02c23..017e2826458b 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -17,7 +17,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ +RUN pip install --no-cache -r requirements.txt ultralytics albumentations gsutil notebook \ coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ # openvino-dev \ --extra-index-url https://download.pytorch.org/whl/cpu From 892c4cd4a5a99d9c824ffeb49ce512ee2c9b93e5 Mon Sep 17 00:00:00 2001 From: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Date: Wed, 9 Nov 2022 22:21:43 +0800 Subject: [PATCH 241/326] AutoShape integer image-size fix (#10090) Update common.py We have a division at line 694, and then a multiplication at line 695, so it makes `y*g` not an integer. And since `shape1` will be used at line 697 to ensure the size is divisible by the `stride`, this may lead to different image size. In my experiment, my image is [640, 640], it's divisible by the default stride 32, but I found that the result is changed to [672, 672] after line 697. So the final detection result is slightly different from that directly using the `detect.py` script, which does not call the AutoShape methods. Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 6347e51cdf0b..64f1b9354225 100644 --- a/models/common.py +++ b/models/common.py @@ -692,7 +692,7 @@ def forward(self, ims, size=640, augment=False, profile=False): s = im.shape[:2] # HWC shape0.append(s) # image shape g = max(size) / max(s) # gain - shape1.append([y * g for y in s]) + shape1.append([int(y * g) for y in s]) ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad From c1fcfe8cd9030939dd1635b158984fb066279b22 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Nov 2022 17:20:26 +0100 Subject: [PATCH 242/326] YouTube Usage example comments (#10106) * YouTube Usage example comments Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index b33a24a46f9c..621c03cd2db1 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -353,6 +353,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL From 75728bb3ea99113f306280b734dedcc5d7d067b1 Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Wed, 9 Nov 2022 17:45:09 +0100 Subject: [PATCH 243/326] Mapped project and name to ClearML (#10100) * Mapped project and name to ClearML * Add project and task name docs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/clearml/README.md | 10 +++++++++- utils/loggers/clearml/clearml_utils.py | 4 ++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index e0c5824bc2a2..3cf4c268583f 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -57,12 +57,20 @@ To enable ClearML experiment tracking, simply install the ClearML pip package. pip install clearml>=1.2.0 ``` -This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py` +This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. + +If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. +PLEASE NOTE: ClearML uses `/` as a delimter for subprojects, so be careful when using `/` in your project name! ```bash python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` +or with custom project and task name: +```bash +python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache +``` + This will capture: - Source code + uncommitted changes - Installed packages diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index eb1c12ce6cac..fe5f597a87a6 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -85,8 +85,8 @@ def __init__(self, opt, hyp): self.data_dict = None if self.clearml: self.task = Task.init( - project_name='YOLOv5', - task_name='training', + project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5', + task_name=opt.name if opt.name != 'exp' else 'Training', tags=['YOLOv5'], output_uri=True, auto_connect_frameworks={'pytorch': False} From 078059c5b3ead9579c53f68c521ed5f0e7e87afa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Nov 2022 18:32:34 +0100 Subject: [PATCH 244/326] Update IoU functions (#10123) Remove box area function and support expandable bbox_iou() calls. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/metrics.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 3b854d4f1583..65ea463c0dab 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -227,13 +227,13 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 # Get the coordinates of bounding boxes if xywh: # transform from xywh to xyxy - (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1) + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ else: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) - b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps @@ -263,11 +263,6 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 return iou # IoU -def box_area(box): - # box = xyxy(4,n) - return (box[2] - box[0]) * (box[3] - box[1]) - - def box_iou(box1, box2, eps=1e-7): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ @@ -282,11 +277,11 @@ def box_iou(box1, box2, eps=1e-7): """ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1) + (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) # IoU = inter / (area1 + area2 - inter) - return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps) + return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps) def bbox_ioa(box1, box2, eps=1e-7): From 55e95168465b094733e3ef1ec36e0a18f200cd94 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Nov 2022 00:21:29 +0100 Subject: [PATCH 245/326] Add Ultralytics HUB to README (#10070) * Add Ultralytics HUB section to README @pderrenger @kalenmike @AlanDimmer @AyushExel new Ultralytics HUB section in YOLOv5 README. Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md @AlanDimmer @kalenmike new integrations image Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 57 ++++++++++++++++++------------------------ README.md | 20 +++++++++++---- segment/tutorial.ipynb | 2 +- tutorial.ipynb | 2 +- 4 files changed, 42 insertions(+), 39 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 4184c4c683d0..90d3da8298cc 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -144,47 +144,40 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-##
环境
-使用经过我们验证的环境,几秒钟就可以开始。点击下面的每个图标了解详情。 +##
Integrations
- - -##
如何与第三方集成
+
+ + +
+
- - - - - - - - - + + + + + + + + +
-|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases +|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|在[Deci](https://bit.ly/yolov5-deci-platform)一键自动编译和量化YOLOv5以提高推理性能|使用[ClearML](https://cutt.ly/yolov5-readme-clearml) (开源!)自动追踪,可视化,以及远程训练YOLOv5|标记并将您的自定义数据直接导出到YOLOv5后,用[Roboflow](https://roboflow.com/?ref=ultralytics)进行训练 |通过[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)自动跟踪以及可视化你在云端所有的YOLOv5训练运行情况 +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| + + +##
Ultralytics HUB
+ +[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! + + + ##
为什么选择 YOLOv5
diff --git a/README.md b/README.md index efe7d4b090bc..5101297782d0 100644 --- a/README.md +++ b/README.md @@ -52,9 +52,7 @@ ##
Documentation
-See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. - -##
Quick Start Examples
+See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples.
Install @@ -71,7 +69,7 @@ pip install -r requirements.txt # install
-
+
Inference YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest @@ -163,7 +161,11 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Integrations
- +
+ + +
+
@@ -184,6 +186,14 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - |Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +##
Ultralytics HUB
+ +[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! + +
+ + + ##
Why YOLOv5
YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 47559978be74..079bfe3057bc 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -232,7 +232,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", diff --git a/tutorial.ipynb b/tutorial.ipynb index b40f08ef20b3..96f05426b4a8 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -626,7 +626,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", From 7c3827a2d66ce83a4afdffebe55d1bfbd39359d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rub=C3=A9n=20Usamentiaga?= Date: Fri, 11 Nov 2022 20:43:16 +0100 Subject: [PATCH 246/326] Fix benchmark.py usage comment (#10131) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update benchmarks.py Signed-off-by: Rubén Usamentiaga Signed-off-by: Rubén Usamentiaga --- benchmarks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks.py b/benchmarks.py index ef5c882973f0..03d7d693a936 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -22,7 +22,7 @@ $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT Usage: - $ python utils/benchmarks.py --weights yolov5s.pt --img 640 + $ python benchmarks.py --weights yolov5s.pt --img 640 """ import argparse From f33718f36f756301b91da6207f1d02f30b3916e1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Nov 2022 21:20:48 +0100 Subject: [PATCH 247/326] Update HUB banner image (#10134) * Update HUB banner image Passed through tinypng for filesize reduction Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 90d3da8298cc..65ecd31a3e69 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -177,7 +177,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - + ##
为什么选择 YOLOv5
diff --git a/README.md b/README.md index 5101297782d0..0fa95f404117 100644 --- a/README.md +++ b/README.md @@ -191,7 +191,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - + ##
Why YOLOv5
From abbfd695232b1bfcbd8e122e2aeb37fcc3d146d5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Nov 2022 18:54:49 +0100 Subject: [PATCH 248/326] Copy-Paste zero value fix (#10152) * Copy-Paste zero value fix Signed-off-by: Glenn Jocher * Update augmentations.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/augmentations.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 7c8e0bcdede6..1eae5db8f816 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -250,12 +250,10 @@ def copy_paste(im, labels, segments, p=0.5): if (ioa < 0.30).all(): # allow 30% obscuration of existing labels labels = np.concatenate((labels, [[l[0], *box]]), 0) segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED) - result = cv2.bitwise_and(src1=im, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug return im, labels, segments From ea73386e5a21f6b6d4f2bdc0ba1f9f8a7ced3f2a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Nov 2022 14:19:47 +0100 Subject: [PATCH 249/326] Add Copy-Paste to `mosaic9()` (#10165) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 621c03cd2db1..54d3f7bbba00 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -869,6 +869,7 @@ def load_mosaic9(self, index): # img9, labels9 = replicate(img9, labels9) # replicate # Augment + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste']) img9, labels9 = random_perspective(img9, labels9, segments9, From 9dd40f072386134d5271a902f135e95979de1419 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Nov 2022 16:27:07 +0100 Subject: [PATCH 250/326] Add `join_threads()` (#10086) * Update __init__.py Signed-off-by: Glenn Jocher * Update __init__.py Signed-off-by: Glenn Jocher * Update __init__.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/__init__.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/utils/__init__.py b/utils/__init__.py index 8354d91c4269..7bf3efe6b8c7 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -37,6 +37,16 @@ def wrapper(*args, **kwargs): return wrapper +def join_threads(verbose=False): + # Join all daemon threads, i.e. atexit.register(lambda: join_threads()) + main_thread = threading.current_thread() + for t in threading.enumerate(): + if t is not main_thread: + if verbose: + print(f'Joining thread {t.name}') + t.join() + + def notebook_init(verbose=True): # Check system software and hardware print('Checking setup...') From 5e03f5fc8cbd658e183bb3812fe1c8553cb8cf05 Mon Sep 17 00:00:00 2001 From: Amol Dumrewal Date: Tue, 15 Nov 2022 23:30:33 +0530 Subject: [PATCH 251/326] Fix dataloader filepath modification to perform replace only once and not for all occurences of string (#10163) * Fix dataloader filepath modification to perform only once and not for all occurences of string * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 54d3f7bbba00..0418293a6e21 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -470,8 +470,8 @@ def __init__(self, with open(p) as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep - f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path - # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib) else: raise FileNotFoundError(f'{prefix}{p} does not exist') self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) From 166b9f2fa79a67788a2a372dc52c9e8e0f7a7cc1 Mon Sep 17 00:00:00 2001 From: Ryan Echols Date: Wed, 16 Nov 2022 08:09:30 -0700 Subject: [PATCH 252/326] fix: prevent logging config clobbering (#10133) Previous behavior: loading this repository with `torch.hub.load` clobbers the existing logging configuration by modifying the root logger's configuration. New behavior: loading this repository with `torch.hub.load` only clobbers the logging configuration for logger `yolov5` and its descendants. Signed-off-by: Ryan Echols Signed-off-by: Ryan Echols --- utils/general.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 0c3b44d7f9b0..76dd2a40b51b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -126,8 +126,9 @@ def set_logging(name=None, verbose=VERBOSE): log.addHandler(handler) -set_logging() # run before defining LOGGER -LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) +logger_name = "yolov5" +set_logging(logger_name) # run before defining LOGGER +LOGGER = logging.getLogger(logger_name) # define globally (used in train.py, val.py, detect.py, etc.) if platform.system() == 'Windows': for fn in LOGGER.info, LOGGER.warning: setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging From 90575107e7b06d48ef91ffa46a41a55439ebdab1 Mon Sep 17 00:00:00 2001 From: tripleMu Date: Wed, 16 Nov 2022 23:10:15 +0800 Subject: [PATCH 253/326] Filter PyTorch 1.13 UserWarnings (#10166) FilterWarning for torch.distributed._all_gather_base Co-authored-by: Glenn Jocher --- utils/torch_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 04a3873854ee..fe934abf118c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -32,6 +32,7 @@ # Suppress PyTorch warnings warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') +warnings.filterwarnings('ignore', category=UserWarning) def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')): From e40662ffdd80a6f108a62cf0d53d06085d943223 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Nov 2022 19:06:19 +0100 Subject: [PATCH 254/326] Revert "fix: prevent logging config clobbering" (#10177) Revert "fix: prevent logging config clobbering (#10133)" This reverts commit 166b9f2fa79a67788a2a372dc52c9e8e0f7a7cc1. --- utils/general.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 76dd2a40b51b..0c3b44d7f9b0 100644 --- a/utils/general.py +++ b/utils/general.py @@ -126,9 +126,8 @@ def set_logging(name=None, verbose=VERBOSE): log.addHandler(handler) -logger_name = "yolov5" -set_logging(logger_name) # run before defining LOGGER -LOGGER = logging.getLogger(logger_name) # define globally (used in train.py, val.py, detect.py, etc.) +set_logging() # run before defining LOGGER +LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) if platform.system() == 'Windows': for fn in LOGGER.info, LOGGER.warning: setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging From a9f895d304aea5920e694606927fa9208aa7f0ed Mon Sep 17 00:00:00 2001 From: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Date: Thu, 17 Nov 2022 20:42:26 +0800 Subject: [PATCH 255/326] Apply make_divisible for ONNX models in Autoshape (#10172) * Apply make_divisible for onnx models in Autoshape At line 697 we have this `make_divisible` function for pytorch models. * Context: we want to run inference on varied input sizes instead of fixed image size. * When I test an image of size [720, 720] for a pytorch model (e.g., yolov5n.pt), we can see that it will be reshaped to [736, 736] by the function. This is as expected. * When I test the same image for the onnx model (e.g., yolov5n.onnx, exported with `--dynamic`), I got an error and it's due to the indivisible problem ``` onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Non-zero status code returned while running Concat node. Name:'Concat_143' Status Message: concat.cc:156 PrepareForCompute Non concat axis dimensions must match: Axis 3 has mismatched dimensions of 45 and 46 ``` The simple solution is to enable the `make_divisible` function for onnx model too. Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> * revise indent Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> * Apply make_divisible to all formats All formats from DetectMultiBackend should have default stride=32 Signed-off-by: Glenn Jocher Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 64f1b9354225..8b5ec1c786d8 100644 --- a/models/common.py +++ b/models/common.py @@ -694,7 +694,7 @@ def forward(self, ims, size=640, augment=False, profile=False): g = max(size) / max(s) # gain shape1.append([int(y * g) for y in s]) ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape + shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 From 1510111b46a24a0c0fa2d685a6f3c96693368654 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Nov 2022 19:22:18 +0100 Subject: [PATCH 256/326] data.yaml `names.keys()` integer assert (#10190) * data.yaml `names.keys()` integer assert Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 0c3b44d7f9b0..2f047b351228 100644 --- a/utils/general.py +++ b/utils/general.py @@ -482,9 +482,10 @@ def check_dataset(data, autodownload=True): # Checks for k in 'train', 'val', 'names': - assert k in data, f"data.yaml '{k}:' field missing ❌" + assert k in data, emojis(f"data.yaml '{k}:' field missing ❌") if isinstance(data['names'], (list, tuple)): # old array format data['names'] = dict(enumerate(data['names'])) # convert to dict + assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car' data['nc'] = len(data['names']) # Resolve paths From ff6e6e328efe43547bc57d4e02ae8ddc3387ef58 Mon Sep 17 00:00:00 2001 From: Ryan Echols Date: Thu, 17 Nov 2022 12:47:46 -0700 Subject: [PATCH 257/326] Fix: try 2 - prevent logging config clobbering (#10192) * fix: try 2 - prevent logging config clobbering Previous behavior: loading this repository with `torch.hub.load` clobbers the existing logging configuration by modifying the root logger's configuration. New behavior: loading this repository with `torch.hub.load` only clobbers the logging configuration for logger `yolov5` and its descendants. This is done in a way compatible with Google Colab Signed-off-by: Ryan Echols * chore: fill in comment no-op so a pre-commit hook can auto-format files Signed-off-by: Ryan Echols * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Ryan Echols Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/general.py | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/utils/general.py b/utils/general.py index 2f047b351228..8f85557a065a 100644 --- a/utils/general.py +++ b/utils/general.py @@ -7,6 +7,7 @@ import glob import inspect import logging +import logging.config import math import os import platform @@ -111,23 +112,33 @@ def is_writeable(dir, test=False): return False -def set_logging(name=None, verbose=VERBOSE): - # Sets level and returns logger - if is_kaggle() or is_colab(): - for h in logging.root.handlers: - logging.root.removeHandler(h) # remove all handlers associated with the root logger object - rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR - log = logging.getLogger(name) - log.setLevel(level) - handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter("%(message)s")) - handler.setLevel(level) - log.addHandler(handler) +LOGGING_NAME = "yolov5" -set_logging() # run before defining LOGGER -LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) +def set_logging(name=LOGGING_NAME, verbose=True): + # sets up logging for the given name + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR + logging.config.dictConfig({ + "version": 1, + "disable_existing_loggers": False, + "formatters": { + name: { + "format": "%(message)s"}}, + "handlers": { + name: { + "class": "logging.StreamHandler", + "formatter": name, + "level": level,}}, + "loggers": { + name: { + "level": level, + "handlers": [name], + "propagate": False,}}}) + + +set_logging(LOGGING_NAME) # run before defining LOGGER +LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) if platform.system() == 'Windows': for fn in LOGGER.info, LOGGER.warning: setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging From 467a57f01b393989867426261d3e9a95566e3e24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 14:19:46 +0100 Subject: [PATCH 258/326] Segment prediction labels normalization fix (#10205) * normalize_segments * round remove * swap axes fix --- segment/predict.py | 2 +- utils/general.py | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 3ae68240726a..da1097c047c1 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -156,7 +156,7 @@ def run( # Segments if save_txt: segments = reversed(masks2segments(masks)) - segments = [scale_segments(im.shape[2:], x, im0.shape).round() for x in segments] + segments = [scale_segments(im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): diff --git a/utils/general.py b/utils/general.py index 8f85557a065a..c543a237d25b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -822,7 +822,7 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): return boxes -def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None): +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new @@ -835,6 +835,9 @@ def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None): segments[:, 1] -= pad[1] # y padding segments /= gain clip_segments(segments, img0_shape) + if normalize: + segments[:, 0] /= img0_shape[1] # width + segments[:, 1] /= img0_shape[0] # height return segments @@ -850,14 +853,14 @@ def clip_boxes(boxes, shape): boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 -def clip_segments(boxes, shape): +def clip_segments(segments, shape): # Clip segments (xy1,xy2,...) to image shape (height, width) - if isinstance(boxes, torch.Tensor): # faster individually - boxes[:, 0].clamp_(0, shape[1]) # x - boxes[:, 1].clamp_(0, shape[0]) # y + if isinstance(segments, torch.Tensor): # faster individually + segments[:, 0].clamp_(0, shape[1]) # x + segments[:, 1].clamp_(0, shape[0]) # y else: # np.array (faster grouped) - boxes[:, 0] = boxes[:, 0].clip(0, shape[1]) # x - boxes[:, 1] = boxes[:, 1].clip(0, shape[0]) # y + segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x + segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y def non_max_suppression( From 241d798bb44a2900591786456a61fd73f3993b4f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 15:05:25 +0100 Subject: [PATCH 259/326] Created using Colaboratory --- tutorial.ipynb | 320 ++++++++++++++++++++++++------------------------- 1 file changed, 159 insertions(+), 161 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 96f05426b4a8..07a6625a1491 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -5,7 +5,6 @@ "colab": { "name": "YOLOv5 Tutorial", "provenance": [], - "collapsed_sections": [], "machine_shape": "hm", "toc_visible": true }, @@ -16,7 +15,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "9b8caa3522fc4cbab31e13b5dfc7808d": { + "13e0e8b77bf54b25b8893f0b4164315f": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -31,14 +30,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", - "IPY_MODEL_35e03ce5090346c9ae602891470fc555", - "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" + "IPY_MODEL_48037f2f7fea4012b9b341f6aee75297", + "IPY_MODEL_3f3b925287274893baf5ed7bb0cf6635", + "IPY_MODEL_c44bdca7c9784b20ba2146250ee744d6" ], - "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" + "layout": "IPY_MODEL_5b0ed23cd32c4c7d8d9467b7425684ad" } }, - "574140e4c4bc48c9a171541a02cd0211": { + "48037f2f7fea4012b9b341f6aee75297": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -53,13 +52,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", + "layout": "IPY_MODEL_1e10b4db5d644cb78bd6e005bb34038a", "placeholder": "​", - "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", + "style": "IPY_MODEL_a58728093ecb4eafb826bee11a84c549", "value": "100%" } }, - "35e03ce5090346c9ae602891470fc555": { + "3f3b925287274893baf5ed7bb0cf6635": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -75,15 +74,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", + "layout": "IPY_MODEL_9ce169fe4b8543c0b26d745daa230f18", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", + "style": "IPY_MODEL_d5da01aca8fb400c96e76f44c9403581", "value": 818322941 } }, - "c942c208e72d46568b476bb0f2d75496": { + "c44bdca7c9784b20ba2146250ee744d6": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -98,13 +97,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", + "layout": "IPY_MODEL_98cbaa572fdd4c42975f52015672b3a5", "placeholder": "​", - "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", - "value": " 780M/780M [02:19<00:00, 6.24MB/s]" + "style": "IPY_MODEL_a636aa81f5cc453099c9e552f0986e63", + "value": " 780M/780M [01:27<00:00, 6.98MB/s]" } }, - "65881db1db8a4e9c930fab9172d45143": { + "5b0ed23cd32c4c7d8d9467b7425684ad": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -156,7 +155,7 @@ "width": null } }, - "60b913d755b34d638478e30705a2dde1": { + "1e10b4db5d644cb78bd6e005bb34038a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -208,7 +207,7 @@ "width": null } }, - "0856bea36ec148b68522ff9c9eb258d8": { + "a58728093ecb4eafb826bee11a84c549": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -223,7 +222,7 @@ "description_width": "" } }, - "76879f6f2aa54637a7a07faeea2bd684": { + "9ce169fe4b8543c0b26d745daa230f18": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -275,7 +274,7 @@ "width": null } }, - "0ace3934ec6f4d36a1b3a9e086390926": { + "d5da01aca8fb400c96e76f44c9403581": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -291,7 +290,7 @@ "description_width": "" } }, - "d6b7a2243e0c4beca714d99dceec23d6": { + "98cbaa572fdd4c42975f52015672b3a5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -343,7 +342,7 @@ "width": null } }, - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { + "a636aa81f5cc453099c9e552f0986e63": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -403,7 +402,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" + "outputId": "bcb6db4a-fc21-4258-9b53-4a760a534656" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -414,20 +413,20 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ - "Setup complete ✅ (8 CPUs, 51.0 GB RAM, 37.4/166.8 GB disk)\n" + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" ] } ] @@ -461,29 +460,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" + "outputId": "de684b46-7623-4836-ee44-49cdb320cbf3" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 27.8MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 162MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.8ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 20.1ms\n", - "Speed: 0.6ms pre-process, 17.4ms inference, 21.6ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.2ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 13.3ms\n", + "Speed: 0.5ms pre-process, 15.2ms inference, 19.5ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -517,27 +516,27 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "9b8caa3522fc4cbab31e13b5dfc7808d", - "574140e4c4bc48c9a171541a02cd0211", - "35e03ce5090346c9ae602891470fc555", - "c942c208e72d46568b476bb0f2d75496", - "65881db1db8a4e9c930fab9172d45143", - "60b913d755b34d638478e30705a2dde1", - "0856bea36ec148b68522ff9c9eb258d8", - "76879f6f2aa54637a7a07faeea2bd684", - "0ace3934ec6f4d36a1b3a9e086390926", - "d6b7a2243e0c4beca714d99dceec23d6", - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" + "13e0e8b77bf54b25b8893f0b4164315f", + "48037f2f7fea4012b9b341f6aee75297", + "3f3b925287274893baf5ed7bb0cf6635", + "c44bdca7c9784b20ba2146250ee744d6", + "5b0ed23cd32c4c7d8d9467b7425684ad", + "1e10b4db5d644cb78bd6e005bb34038a", + "a58728093ecb4eafb826bee11a84c549", + "9ce169fe4b8543c0b26d745daa230f18", + "d5da01aca8fb400c96e76f44c9403581", + "98cbaa572fdd4c42975f52015672b3a5", + "a636aa81f5cc453099c9e552f0986e63" ] }, - "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" + "outputId": "b1e02a1f-981f-4739-e75d-10d0204cc32d" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "display_data", @@ -548,7 +547,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "9b8caa3522fc4cbab31e13b5dfc7808d" + "model_id": "13e0e8b77bf54b25b8893f0b4164315f" } }, "metadata": {} @@ -562,45 +561,43 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" + "outputId": "9c2f755f-f383-4a9e-cd19-f73a0c763a9c" }, "source": [ "# Validate YOLOv5s on COCO val\n", "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 52.7MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10509.20it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2019.92it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 157/157 [00:50<00:00, 3.10it/s]\n", + " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.25it/s]\n", " all 5000 36335 0.67 0.521 0.566 0.371\n", - "Speed: 0.1ms pre-process, 1.0ms inference, 1.5ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.2ms pre-process, 2.7ms inference, 2.1ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.81s)\n", + "Done (t=0.41s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.62s)\n", + "DONE (t=6.19s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=77.03s).\n", + "DONE (t=75.81s).\n", "Accumulating evaluation results...\n", - "DONE (t=14.63s).\n", + "DONE (t=15.26s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", @@ -612,7 +609,7 @@ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.566\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.378\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.625\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.724\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.723\n", "Results saved to \u001b[1mruns/val/exp\u001b[0m\n" ] } @@ -664,7 +661,8 @@ " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init" + " %pip install -q clearml\n", + " import clearml; clearml.browser_login()" ], "metadata": { "id": "i3oKtE4g-aNn" @@ -679,13 +677,13 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" + "outputId": "7d03d4d2-9a6e-47de-88f4-c673b55c73c5" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", @@ -693,17 +691,17 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet' to automatically track and visualize YOLOv5 🚀 runs with Comet\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", + "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 41.1MB/s]\n", - "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 26.1MB/s]\n", + "Dataset download success ✅ (0.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -731,120 +729,120 @@ " 22 [-1, 10] 1 0 models.common.Concat [1] \n", " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model summary: 270 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n", + "Model summary: 214 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n", "\n", "Transferred 349/349 items from yolov5s.pt\n", "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 9659.25it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1989.66it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 951.31it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 18 Nov 2022 16:32:29 +0100 Subject: [PATCH 260/326] Created using Colaboratory --- segment/tutorial.ipynb | 1500 +++++++++++++++------------------------- 1 file changed, 572 insertions(+), 928 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 079bfe3057bc..c26878fb0dbf 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -1,956 +1,600 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Run\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] }, - "id": "wbvMlHd_QwMG", - "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" - }, - "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 107.3/196.6 GB disk)\n" - ] - } - ], - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Predict\n", - "\n", - "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", - "\n", - "```shell\n", - "python segment/predict.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " screen # screenshot\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "d1e33dfc-9ad4-436e-f1e5-01acee40c029" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] }, - "id": "zR9ZbuQCH7FX", - "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "Fusing layers... \n", - "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 5.6ms\n", - "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 5.5ms\n", - "Speed: 0.4ms pre-process, 5.6ms inference, 1.1ms NMS per image at shape (1, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" - ] - } - ], - "source": [ - "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n", - "#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "9b8caa3522fc4cbab31e13b5dfc7808d", - "574140e4c4bc48c9a171541a02cd0211", - "35e03ce5090346c9ae602891470fc555", - "c942c208e72d46568b476bb0f2d75496", - "65881db1db8a4e9c930fab9172d45143", - "60b913d755b34d638478e30705a2dde1", - "0856bea36ec148b68522ff9c9eb258d8", - "76879f6f2aa54637a7a07faeea2bd684", - "0ace3934ec6f4d36a1b3a9e086390926", - "d6b7a2243e0c4beca714d99dceec23d6", - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" - ] + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", + "\n", + "```shell\n", + "python segment/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] }, - "id": "WQPtK1QYVaD_", - "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" - }, - "outputs": [ { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "89f5f0a84ca642378724f1bf05f17e0d", - "version_major": 2, - "version_minor": 0 + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "e206fcec-cf42-4754-8a42-39bc3603eba8" }, - "text/plain": [ - " 0%| | 0.00/6.79M [00:00

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", - "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "outputs": [], - "source": [ - "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", - "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train-seg\n", - "elif logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] }, - "id": "1NcFxRcFdJ_O", - "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", - "\n", - " from n params module arguments \n", - " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", - " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", - " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", - " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", - " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", - " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", - " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", - " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", - " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", - " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 12 [-1, 6] 1 0 models.common.Concat [1] \n", - " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", - " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", - " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 16 [-1, 4] 1 0 models.common.Concat [1] \n", - " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", - " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", - " 19 [-1, 14] 1 0 models.common.Concat [1] \n", - " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", - " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", - " 22 [-1, 10] 1 0 models.common.Concat [1] \n", - " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", - "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", - "\n", - "Transferred 367/367 items from yolov5s-seg.pt\n", - "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' im\u001b[0m\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 544.41\u001b[0m\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' imag\u001b[0m\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 138.66it\u001b[0m\n", - "\n", - "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n", - "Plotting labels to runs/train-seg/exp/labels.jpg... \n", - "Image sizes 640 train, 640 val\n", - "Using 4 dataloader workers\n", - "Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n", - "Starting training for 3 epochs...\n", - "\n", - " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", - " 0/2 4.67G 0.04464 0.05134 0.06548 0.01895 219 \n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.727 0.661 0.725 0.496 0.688 0.629 0.673 0.413\n", - "\n", - " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", - " 1/2 6.36G 0.04102 0.04702 0.06873 0.01734 263 \n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.752 0.676 0.743 0.51 0.704 0.64 0.682 0.425\n", - "\n", - " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", - " 2/2 6.36G 0.0421 0.04463 0.05951 0.01746 245 \n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.776 0.674 0.757 0.514 0.72 0.632 0.684 0.429\n", - "\n", - "3 epochs completed in 0.006 hours.\n", - "Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n", - "Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n", - "\n", - "Validating runs/train-seg/exp/weights/best.pt...\n", - "Fusing layers... \n", - "Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.775 0.673 0.758 0.515 0.72 0.632 0.684 0.427\n", - " person 128 254 0.829 0.745 0.833 0.545 0.776 0.697 0.764 0.406\n", - " bicycle 128 6 0.614 0.333 0.539 0.331 0.614 0.333 0.531 0.308\n", - " car 128 46 0.774 0.413 0.571 0.266 0.693 0.37 0.493 0.204\n", - " motorcycle 128 5 0.817 0.901 0.895 0.678 0.817 0.901 0.895 0.47\n", - " airplane 128 6 1 0.951 0.995 0.71 0.882 0.833 0.839 0.515\n", - " bus 128 7 0.695 0.714 0.757 0.661 0.695 0.714 0.757 0.627\n", - " train 128 3 1 0.935 0.995 0.566 1 0.935 0.995 0.731\n", - " truck 128 12 0.741 0.417 0.463 0.283 0.741 0.417 0.4 0.27\n", - " boat 128 6 0.653 0.32 0.452 0.17 0.653 0.32 0.328 0.149\n", - " traffic light 128 14 0.627 0.36 0.527 0.234 0.503 0.289 0.409 0.293\n", - " stop sign 128 2 0.829 1 0.995 0.747 0.829 1 0.995 0.821\n", - " bench 128 9 0.822 0.667 0.76 0.414 0.685 0.556 0.678 0.228\n", - " bird 128 16 0.967 1 0.995 0.675 0.906 0.938 0.909 0.516\n", - " cat 128 4 0.778 0.89 0.945 0.728 0.778 0.89 0.945 0.69\n", - " dog 128 9 1 0.65 0.973 0.697 1 0.65 0.939 0.615\n", - " horse 128 2 0.727 1 0.995 0.672 0.727 1 0.995 0.2\n", - " elephant 128 17 1 0.912 0.946 0.704 0.871 0.794 0.822 0.565\n", - " bear 128 1 0.626 1 0.995 0.895 0.626 1 0.995 0.895\n", - " zebra 128 4 0.865 1 0.995 0.934 0.865 1 0.995 0.822\n", - " giraffe 128 9 0.975 1 0.995 0.672 0.866 0.889 0.876 0.473\n", - " backpack 128 6 1 0.573 0.707 0.38 0.891 0.5 0.524 0.249\n", - " umbrella 128 18 0.744 0.889 0.926 0.552 0.465 0.556 0.483 0.262\n", - " handbag 128 19 0.799 0.209 0.432 0.225 0.799 0.209 0.403 0.201\n", - " tie 128 7 0.968 0.857 0.857 0.53 0.968 0.857 0.857 0.519\n", - " suitcase 128 4 0.821 1 0.995 0.696 0.821 1 0.995 0.665\n", - " frisbee 128 5 0.777 0.8 0.761 0.613 0.777 0.8 0.761 0.558\n", - " skis 128 1 0.721 1 0.995 0.497 0.721 1 0.995 0.398\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " snowboard 128 7 0.851 0.857 0.887 0.599 0.284 0.286 0.253 0.151\n", - " sports ball 128 6 0.961 0.667 0.687 0.429 0.721 0.5 0.481 0.476\n", - " kite 128 10 0.508 0.312 0.48 0.238 0.508 0.312 0.406 0.122\n", - " baseball bat 128 4 0.331 0.5 0.526 0.249 0.331 0.5 0.376 0.102\n", - " baseball glove 128 7 0.876 0.571 0.579 0.282 0.657 0.429 0.429 0.343\n", - " skateboard 128 5 1 0.697 0.824 0.471 0.707 0.497 0.552 0.299\n", - " tennis racket 128 7 0.524 0.714 0.646 0.426 0.524 0.714 0.646 0.452\n", - " bottle 128 18 0.657 0.389 0.531 0.359 0.657 0.389 0.569 0.362\n", - " wine glass 128 16 0.752 0.938 0.924 0.435 0.451 0.562 0.568 0.341\n", - " cup 128 36 0.859 0.676 0.848 0.503 0.823 0.648 0.793 0.496\n", - " fork 128 6 0.904 0.333 0.462 0.309 0.452 0.167 0.195 0.107\n", - " knife 128 16 0.749 0.5 0.665 0.413 0.655 0.438 0.523 0.314\n", - " spoon 128 22 0.787 0.409 0.577 0.275 0.787 0.409 0.528 0.236\n", - " bowl 128 28 0.793 0.679 0.744 0.577 0.751 0.643 0.688 0.366\n", - " banana 128 1 0.931 1 0.995 0.398 0.931 1 0.995 0.497\n", - " sandwich 128 2 1 0 0.828 0.713 1 0 0.498 0.449\n", - " orange 128 4 0.588 1 0.995 0.666 0.588 1 0.995 0.672\n", - " broccoli 128 11 0.563 0.455 0.356 0.258 0.563 0.455 0.362 0.259\n", - " carrot 128 24 0.683 0.75 0.753 0.489 0.758 0.833 0.835 0.451\n", - " hot dog 128 2 0.583 1 0.995 0.995 0.583 1 0.995 0.796\n", - " pizza 128 5 0.801 0.8 0.962 0.644 0.801 0.8 0.962 0.583\n", - " donut 128 14 0.704 1 0.889 0.759 0.704 1 0.889 0.683\n", - " cake 128 4 0.904 1 0.995 0.896 0.904 1 0.995 0.838\n", - " chair 128 35 0.672 0.543 0.629 0.333 0.708 0.571 0.583 0.284\n", - " couch 128 6 0.827 0.5 0.821 0.583 0.827 0.5 0.681 0.352\n", - " potted plant 128 14 0.809 0.908 0.884 0.584 0.809 0.908 0.884 0.474\n", - " bed 128 3 1 0.654 0.913 0.36 1 0.654 0.913 0.418\n", - " dining table 128 13 0.803 0.385 0.557 0.361 0.321 0.154 0.126 0.0487\n", - " toilet 128 2 0.802 1 0.995 0.921 0.802 1 0.995 0.698\n", - " tv 128 2 0.59 1 0.995 0.846 0.59 1 0.995 0.846\n", - " laptop 128 3 1 0 0.451 0.324 1 0 0.372 0.157\n", - " mouse 128 2 1 0 0 0 1 0 0 0\n", - " remote 128 8 0.831 0.5 0.625 0.495 0.831 0.5 0.629 0.436\n", - " cell phone 128 8 0.867 0.375 0.482 0.26 0.578 0.25 0.302 0.127\n", - " microwave 128 3 0.782 1 0.995 0.695 0.782 1 0.995 0.585\n", - " oven 128 5 0.389 0.4 0.432 0.299 0.584 0.6 0.642 0.411\n", - " sink 128 6 0.657 0.5 0.491 0.373 0.657 0.5 0.436 0.303\n", - " refrigerator 128 5 0.729 0.8 0.778 0.547 0.729 0.8 0.778 0.496\n", - " book 128 29 0.77 0.231 0.451 0.186 0.77 0.231 0.399 0.136\n", - " clock 128 9 0.798 0.889 0.956 0.747 0.798 0.889 0.926 0.68\n", - " vase 128 2 0.437 1 0.995 0.895 0.437 1 0.995 0.796\n", - " scissors 128 1 0 0 0.0226 0.0113 0 0 0 0\n", - " teddy bear 128 21 0.815 0.629 0.877 0.521 0.753 0.582 0.793 0.435\n", - " toothbrush 128 5 1 0.719 0.995 0.737 1 0.719 0.995 0.606\n", - "Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n" - ] - } - ], - "source": [ - "# Train YOLOv5s on COCO128 for 3 epochs\n", - "!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "15glLzbQx5u0" - }, - "source": [ - "# 4. Visualize" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nWOsI5wJR1o3" - }, - "source": [ - "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", - "\n", - "Getting started is easy:\n", - "```shell\n", - "pip install comet_ml # 1. install\n", - "export COMET_API_KEY= # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\"yolo-ui\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Lay2WsTjNJzP" - }, - "source": [ - "## ClearML Logging and Automation 🌟 NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", - "\n", - "\n", - "\"ClearML" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GMusP4OAxFu6" - }, - "outputs": [], - "source": [ - "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", - "import torch\n", - "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", - "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", - "results = model(im) # inference\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "machine_shape": "hm", - "name": "YOLOv5 Tutorial", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.12" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "0856bea36ec148b68522ff9c9eb258d8": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "WQPtK1QYVaD_", + "outputId": "f7eba0ae-49d1-405b-a1cf-169212fadc2c" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip ...\n", + "Downloading http://images.cocodataset.org/zips/val2017.zip ...\n", + "######################################################################## 100.0%\n", + "######################################################################## 100.0%\n" + ] + } + ], + "source": [ + "# Download COCO val\n", + "!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "73533135-6995-4f2d-adb0-3acb5ef9b300" + }, + "outputs": [ + { + "metadata": { + "tags": null + }, + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Fusing layers... \n", + "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1420.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", + " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", + "Speed: 0.9ms pre-process, 3.9ms inference, 3.0ms NMS per image at shape (32, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s-seg on COCO val\n", + "!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train-seg\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " import clearml; clearml.browser_login()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "8e349df5-9910-4a91-a845-748def15d3d7" + }, + "outputs": [ + { + "metadata": { + "tags": null + }, + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", + "\n", + "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", + "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", + "100% 6.79M/6.79M [00:01<00:00, 4.42MB/s]\n", + "Dataset download success ✅ (2.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", + " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", + "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", + "\n", + "Transferred 367/367 items from yolov5s-seg.pt\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1383.68it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 241.77it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017.cache' images and labels... 126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00 # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] }, - "574140e4c4bc48c9a171541a02cd0211": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", - "placeholder": "​", - "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", - "value": "100%" - } + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] }, - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] }, - "60b913d755b34d638478e30705a2dde1": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] }, - "65881db1db8a4e9c930fab9172d45143": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] }, - "76879f6f2aa54637a7a07faeea2bd684": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] }, - "9b8caa3522fc4cbab31e13b5dfc7808d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", - "IPY_MODEL_35e03ce5090346c9ae602891470fc555", - "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" - ], - "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" - } + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "machine_shape": "hm", + "name": "YOLOv5 Tutorial", + "provenance": [], + "toc_visible": true }, - "c942c208e72d46568b476bb0f2d75496": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", - "placeholder": "​", - "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", - "value": " 780M/780M [02:19<00:00, 6.24MB/s]" - } + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "d6b7a2243e0c4beca714d99dceec23d6": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" } - } - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From 74b3886edd55bc9b681b8a956275abb9e6e1e2cd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 21:18:57 +0100 Subject: [PATCH 261/326] Simplify dataloader tqdm descriptions (#10210) * Simplify dataloader tqdm descriptions @AyushExel this should help our tqdm dataloader messages fit better within a single line in our Colab notebooks and also help avoid confusion about missing/empty labels, now combined into 'backgrounds'. Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 0418293a6e21..39db3c0dfd21 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -31,8 +31,8 @@ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - colorstr, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, - xywhn2xyxy, xyxy2xywhn) + cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, + xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -493,7 +493,7 @@ def __init__(self, # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: - d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" + d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings @@ -607,7 +607,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + desc = f"{prefix}Scanning {path.parent / path.stem}..." with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, @@ -622,7 +622,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [lb, shape, segments] if msg: msgs.append(msg) - pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" + pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" pbar.close() if msgs: From 0322bb31962d68caefa0c0c5880d80d27e8ab8ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 21:39:13 +0100 Subject: [PATCH 262/326] New global `TQDM_BAR_FORMAT` (#10211) * New global TQDM_BAR_FORMAT * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/train.py | 6 +++--- classify/val.py | 5 +++-- segment/train.py | 10 +++++----- segment/val.py | 8 ++++---- train.py | 11 ++++++----- utils/autoanchor.py | 4 ++-- utils/dataloaders.py | 15 +++++++-------- utils/general.py | 1 + val.py | 8 ++++---- 9 files changed, 35 insertions(+), 33 deletions(-) diff --git a/classify/train.py b/classify/train.py index 178ebcdfff53..4422ca26b0ae 100644 --- a/classify/train.py +++ b/classify/train.py @@ -40,8 +40,8 @@ from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, LOGGER, WorkingDirectory, check_git_status, check_requirements, colorstr, - download, increment_path, init_seeds, print_args, yaml_save) +from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, + check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, @@ -174,7 +174,7 @@ def train(opt, device): trainloader.sampler.set_epoch(epoch) pbar = enumerate(trainloader) if RANK in {-1, 0}: - pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') + pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT) for i, (images, labels) in pbar: # progress bar images, labels = images.to(device, non_blocking=True), labels.to(device) diff --git a/classify/val.py b/classify/val.py index c0b507785fb0..8657036fb2a2 100644 --- a/classify/val.py +++ b/classify/val.py @@ -36,7 +36,8 @@ from models.common import DetectMultiBackend from utils.dataloaders import create_classification_dataloader -from utils.general import LOGGER, Profile, check_img_size, check_requirements, colorstr, increment_path, print_args +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr, + increment_path, print_args) from utils.torch_utils import select_device, smart_inference_mode @@ -100,7 +101,7 @@ def run( n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" - bar = tqdm(dataloader, desc, n, not training, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0) + bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): for images, labels in bar: with dt[0]: diff --git a/segment/train.py b/segment/train.py index f067918e7c3c..2a0793d1aa3e 100644 --- a/segment/train.py +++ b/segment/train.py @@ -46,10 +46,10 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, - check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, - init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, - print_args, print_mutation, strip_optimizer, yaml_save) +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, + check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, + increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import GenericLogger from utils.plots import plot_evolve, plot_labels from utils.segment.dataloaders import create_dataloader @@ -277,7 +277,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio LOGGER.info(('\n' + '%11s' * 8) % ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) if RANK in {-1, 0}: - pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ # callbacks.run('on_train_batch_start') diff --git a/segment/val.py b/segment/val.py index a875b3b79907..9bb8f9e4cf54 100644 --- a/segment/val.py +++ b/segment/val.py @@ -42,9 +42,9 @@ from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks -from utils.general import (LOGGER, NUM_THREADS, Profile, check_dataset, check_img_size, check_requirements, check_yaml, - coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, + check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, + non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader @@ -237,7 +237,7 @@ def run( loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') - pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: diff --git a/train.py b/train.py index 1fe6cf4d9ebd..bbbd6d07db00 100644 --- a/train.py +++ b/train.py @@ -47,10 +47,11 @@ from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, - check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, - init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, - one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, + check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, + increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, + yaml_save) from utils.loggers import Loggers from utils.loggers.comet.comet_utils import check_comet_resume from utils.loss import ComputeLoss @@ -275,7 +276,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio pbar = enumerate(train_loader) LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) if RANK in {-1, 0}: - pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- callbacks.run('on_train_batch_start') diff --git a/utils/autoanchor.py b/utils/autoanchor.py index cfc4c276e3aa..bb5cf6e6965e 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -11,7 +11,7 @@ from tqdm import tqdm from utils import TryExcept -from utils.general import LOGGER, colorstr +from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr PREFIX = colorstr('AutoAnchor: ') @@ -153,7 +153,7 @@ def print_results(k, verbose=True): # Evolve f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT) # progress bar for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 39db3c0dfd21..e107d1a2bccf 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -29,17 +29,16 @@ from tqdm import tqdm from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, - cutout, letterbox, mixup, random_perspective) -from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, - xyxy2xywhn) + letterbox, mixup, random_perspective) +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, + check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, + xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes -BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders @@ -494,7 +493,7 @@ def __init__(self, nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" - tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' @@ -576,7 +575,7 @@ def __init__(self, self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) - pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) + pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) for i, x in pbar: if cache_images == 'disk': b += self.npy_files[i].stat().st_size @@ -612,7 +611,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.im_files), - bar_format=BAR_FORMAT) + bar_format=TQDM_BAR_FORMAT) for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f diff --git a/utils/general.py b/utils/general.py index c543a237d25b..58181f00568d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -50,6 +50,7 @@ DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf torch.set_printoptions(linewidth=320, precision=5, profile='long') diff --git a/val.py b/val.py index 127acf810029..ef282e37bdc1 100644 --- a/val.py +++ b/val.py @@ -38,9 +38,9 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader -from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_yaml, - coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, + check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, + print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, smart_inference_mode @@ -193,7 +193,7 @@ def run( loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] callbacks.run('on_val_start') - pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): callbacks.run('on_val_batch_start') with dt[0]: From 6f377f9d8a7f24a0766d2cfdef6d1e18873d33f9 Mon Sep 17 00:00:00 2001 From: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Date: Fri, 18 Nov 2022 14:05:45 -0700 Subject: [PATCH 263/326] Feature/classification tutorial refactor (#10039) * Added a tutorial notebook for classification. * Split a cell so that there is less room for error when a user pastes their own code snippet. Also added an active learning section at the end. * Added a section to the classification tutorial notebook about the various methods of input for `classify/predict.py`. * Updated link to colab * WIP commit to show some of the errors when trying to match the main tutorial. * Refactored the classification tutorial to be closer to the main tutorial. * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Ayush Chaurasia Co-authored-by: Glenn Jocher --- classify/tutorial.ipynb | 1843 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 1843 insertions(+) create mode 100644 classify/tutorial.ipynb diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb new file mode 100644 index 000000000000..8ed8b5db8a35 --- /dev/null +++ b/classify/tutorial.ipynb @@ -0,0 +1,1843 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 152.0/196.6 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "\n", + "```shell\n", + "python classify/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x640 minibus 0.01, recreational vehicle 0.01, ambulance 0.01, tram 0.01, trolleybus 0.01, 2.6ms\n", + "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 640x640 suit 0.05, bow tie 0.01, ping-pong ball 0.01, microphone 0.01, bassoon 0.01, 2.8ms\n", + "Speed: 1.2ms pre-process, 2.7ms inference, 0.1ms NMS per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", + "2 labels saved to runs/predict-cls/exp/labels\n" + ] + } + ], + "source": [ + "!python classify/predict.py --weights yolov5s-cls.pt --img 640 --source data/images\n", + "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 49, + "referenced_widgets": [ + "9b8caa3522fc4cbab31e13b5dfc7808d", + "574140e4c4bc48c9a171541a02cd0211", + "35e03ce5090346c9ae602891470fc555", + "c942c208e72d46568b476bb0f2d75496", + "65881db1db8a4e9c930fab9172d45143", + "60b913d755b34d638478e30705a2dde1", + "0856bea36ec148b68522ff9c9eb258d8", + "76879f6f2aa54637a7a07faeea2bd684", + "0ace3934ec6f4d36a1b3a9e086390926", + "d6b7a2243e0c4beca714d99dceec23d6", + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" + ] + }, + "id": "WQPtK1QYVaD_", + "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" + }, + "outputs": [], + "source": [ + "# Download Imagenet val\n", + "!bash data/scripts/get_imagenet.sh --val" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=320, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "validating: 100%|██████████| 391/391 [02:36<00:00, 2.49it/s] \n", + " Class Images top1_acc top5_acc\n", + " all 50000 0.734 0.914\n", + " tench 50 0.92 0.98\n", + " goldfish 50 0.86 0.98\n", + " great white shark 50 0.76 0.94\n", + " tiger shark 50 0.84 0.96\n", + " hammerhead shark 50 0.88 0.98\n", + " electric ray 50 0.76 0.88\n", + " stingray 50 0.74 0.94\n", + " cock 50 0.74 0.94\n", + " hen 50 0.86 0.96\n", + " ostrich 50 0.98 1\n", + " brambling 50 0.9 0.98\n", + " goldfinch 50 0.92 1\n", + " house finch 50 0.92 1\n", + " junco 50 0.98 1\n", + " indigo bunting 50 0.86 0.94\n", + " American robin 50 0.94 1\n", + " bulbul 50 0.88 0.92\n", + " jay 50 0.92 0.98\n", + " magpie 50 0.9 0.98\n", + " chickadee 50 0.96 1\n", + " American dipper 50 0.86 0.92\n", + " kite 50 0.8 0.94\n", + " bald eagle 50 0.9 0.98\n", + " vulture 50 0.96 1\n", + " great grey owl 50 0.96 0.98\n", + " fire salamander 50 0.96 0.98\n", + " smooth newt 50 0.66 0.98\n", + " newt 50 0.74 0.84\n", + " spotted salamander 50 0.9 0.98\n", + " axolotl 50 0.9 0.98\n", + " American bullfrog 50 0.8 0.92\n", + " tree frog 50 0.8 0.94\n", + " tailed frog 50 0.5 0.82\n", + " loggerhead sea turtle 50 0.7 0.92\n", + " leatherback sea turtle 50 0.58 0.8\n", + " mud turtle 50 0.58 0.84\n", + " terrapin 50 0.52 0.98\n", + " box turtle 50 0.88 1\n", + " banded gecko 50 0.78 0.9\n", + " green iguana 50 0.78 0.92\n", + " Carolina anole 50 0.62 0.98\n", + "desert grassland whiptail lizard 50 0.88 0.96\n", + " agama 50 0.78 0.96\n", + " frilled-necked lizard 50 0.82 0.94\n", + " alligator lizard 50 0.64 0.84\n", + " Gila monster 50 0.76 0.86\n", + " European green lizard 50 0.5 0.96\n", + " chameleon 50 0.78 0.9\n", + " Komodo dragon 50 0.9 1\n", + " Nile crocodile 50 0.66 0.92\n", + " American alligator 50 0.78 0.98\n", + " triceratops 50 0.96 0.98\n", + " worm snake 50 0.76 0.9\n", + " ring-necked snake 50 0.84 0.96\n", + " eastern hog-nosed snake 50 0.62 0.86\n", + " smooth green snake 50 0.64 0.96\n", + " kingsnake 50 0.78 0.94\n", + " garter snake 50 0.86 0.98\n", + " water snake 50 0.78 0.92\n", + " vine snake 50 0.72 0.86\n", + " night snake 50 0.34 0.86\n", + " boa constrictor 50 0.8 0.96\n", + " African rock python 50 0.52 0.82\n", + " Indian cobra 50 0.8 0.94\n", + " green mamba 50 0.56 0.92\n", + " sea snake 50 0.76 0.94\n", + " Saharan horned viper 50 0.48 0.88\n", + "eastern diamondback rattlesnake 50 0.72 0.92\n", + " sidewinder 50 0.38 0.92\n", + " trilobite 50 0.98 0.98\n", + " harvestman 50 0.86 0.94\n", + " scorpion 50 0.88 0.94\n", + " yellow garden spider 50 0.88 0.96\n", + " barn spider 50 0.38 0.96\n", + " European garden spider 50 0.6 0.98\n", + " southern black widow 50 0.84 0.98\n", + " tarantula 50 0.94 0.98\n", + " wolf spider 50 0.7 0.92\n", + " tick 50 0.76 0.82\n", + " centipede 50 0.74 0.86\n", + " black grouse 50 0.88 0.98\n", + " ptarmigan 50 0.84 0.98\n", + " ruffed grouse 50 0.9 1\n", + " prairie grouse 50 0.9 0.96\n", + " peacock 50 0.9 0.9\n", + " quail 50 0.88 0.94\n", + " partridge 50 0.66 0.94\n", + " grey parrot 50 0.94 0.98\n", + " macaw 50 0.92 0.98\n", + "sulphur-crested cockatoo 50 0.94 0.98\n", + " lorikeet 50 0.98 1\n", + " coucal 50 0.9 0.92\n", + " bee eater 50 0.96 0.98\n", + " hornbill 50 0.86 0.98\n", + " hummingbird 50 0.9 0.98\n", + " jacamar 50 0.94 0.94\n", + " toucan 50 0.84 0.94\n", + " duck 50 0.78 0.94\n", + " red-breasted merganser 50 0.94 0.98\n", + " goose 50 0.76 0.98\n", + " black swan 50 0.94 1\n", + " tusker 50 0.58 0.92\n", + " echidna 50 1 1\n", + " platypus 50 0.72 0.84\n", + " wallaby 50 0.86 0.92\n", + " koala 50 0.84 0.98\n", + " wombat 50 0.82 0.86\n", + " jellyfish 50 0.94 0.96\n", + " sea anemone 50 0.66 0.98\n", + " brain coral 50 0.9 0.96\n", + " flatworm 50 0.76 1\n", + " nematode 50 0.9 0.92\n", + " conch 50 0.74 0.92\n", + " snail 50 0.78 0.86\n", + " slug 50 0.78 0.9\n", + " sea slug 50 0.94 0.98\n", + " chiton 50 0.86 0.96\n", + " chambered nautilus 50 0.86 0.94\n", + " Dungeness crab 50 0.86 0.96\n", + " rock crab 50 0.66 0.88\n", + " fiddler crab 50 0.64 0.88\n", + " red king crab 50 0.78 0.92\n", + " American lobster 50 0.78 0.96\n", + " spiny lobster 50 0.78 0.88\n", + " crayfish 50 0.56 0.84\n", + " hermit crab 50 0.82 0.96\n", + " isopod 50 0.62 0.74\n", + " white stork 50 0.88 0.94\n", + " black stork 50 0.86 0.96\n", + " spoonbill 50 0.96 1\n", + " flamingo 50 0.94 1\n", + " little blue heron 50 0.92 0.98\n", + " great egret 50 0.9 0.98\n", + " bittern 50 0.9 0.92\n", + " crane (bird) 50 0.64 0.94\n", + " limpkin 50 0.96 0.98\n", + " common gallinule 50 0.96 0.96\n", + " American coot 50 0.94 1\n", + " bustard 50 0.96 0.98\n", + " ruddy turnstone 50 0.96 1\n", + " dunlin 50 0.86 0.94\n", + " common redshank 50 0.92 0.96\n", + " dowitcher 50 0.9 1\n", + " oystercatcher 50 0.9 0.96\n", + " pelican 50 0.96 1\n", + " king penguin 50 0.88 0.92\n", + " albatross 50 0.9 0.98\n", + " grey whale 50 0.86 0.94\n", + " killer whale 50 0.9 0.98\n", + " dugong 50 0.88 0.94\n", + " sea lion 50 0.78 0.98\n", + " Chihuahua 50 0.56 0.82\n", + " Japanese Chin 50 0.7 0.98\n", + " Maltese 50 0.86 0.94\n", + " Pekingese 50 0.84 0.94\n", + " Shih Tzu 50 0.68 0.94\n", + " King Charles Spaniel 50 0.92 0.98\n", + " Papillon 50 0.92 0.94\n", + " toy terrier 50 0.48 0.96\n", + " Rhodesian Ridgeback 50 0.76 0.94\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Afghan Hound 50 0.9 0.98\n", + " Basset Hound 50 0.78 0.9\n", + " Beagle 50 0.82 0.98\n", + " Bloodhound 50 0.5 0.78\n", + " Bluetick Coonhound 50 0.84 0.94\n", + " Black and Tan Coonhound 50 0.46 0.8\n", + "Treeing Walker Coonhound 50 0.58 0.98\n", + " English foxhound 50 0.24 0.8\n", + " Redbone Coonhound 50 0.66 0.92\n", + " borzoi 50 0.94 1\n", + " Irish Wolfhound 50 0.64 0.9\n", + " Italian Greyhound 50 0.8 0.98\n", + " Whippet 50 0.82 0.98\n", + " Ibizan Hound 50 0.64 0.92\n", + " Norwegian Elkhound 50 0.88 1\n", + " Otterhound 50 0.58 0.9\n", + " Saluki 50 0.72 0.92\n", + " Scottish Deerhound 50 0.86 1\n", + " Weimaraner 50 0.88 0.96\n", + "Staffordshire Bull Terrier 50 0.62 0.92\n", + "American Staffordshire Terrier 50 0.66 0.92\n", + " Bedlington Terrier 50 0.82 0.96\n", + " Border Terrier 50 0.9 0.98\n", + " Kerry Blue Terrier 50 0.82 1\n", + " Irish Terrier 50 0.74 0.94\n", + " Norfolk Terrier 50 0.74 0.92\n", + " Norwich Terrier 50 0.68 0.98\n", + " Yorkshire Terrier 50 0.66 0.88\n", + " Wire Fox Terrier 50 0.66 0.96\n", + " Lakeland Terrier 50 0.82 0.94\n", + " Sealyham Terrier 50 0.74 0.9\n", + " Airedale Terrier 50 0.82 0.9\n", + " Cairn Terrier 50 0.82 0.94\n", + " Australian Terrier 50 0.48 0.84\n", + " Dandie Dinmont Terrier 50 0.84 0.9\n", + " Boston Terrier 50 0.88 1\n", + " Miniature Schnauzer 50 0.7 0.92\n", + " Giant Schnauzer 50 0.82 1\n", + " Standard Schnauzer 50 0.72 0.98\n", + " Scottish Terrier 50 0.78 0.94\n", + " Tibetan Terrier 50 0.64 0.98\n", + "Australian Silky Terrier 50 0.72 0.96\n", + "Soft-coated Wheaten Terrier 50 0.86 0.98\n", + "West Highland White Terrier 50 0.94 0.98\n", + " Lhasa Apso 50 0.66 0.96\n", + " Flat-Coated Retriever 50 0.78 1\n", + " Curly-coated Retriever 50 0.84 0.96\n", + " Golden Retriever 50 0.88 0.96\n", + " Labrador Retriever 50 0.82 0.94\n", + "Chesapeake Bay Retriever 50 0.86 0.98\n", + "German Shorthaired Pointer 50 0.84 0.96\n", + " Vizsla 50 0.7 0.94\n", + " English Setter 50 0.8 1\n", + " Irish Setter 50 0.78 0.9\n", + " Gordon Setter 50 0.84 0.92\n", + " Brittany 50 0.86 0.98\n", + " Clumber Spaniel 50 0.9 0.96\n", + "English Springer Spaniel 50 0.96 1\n", + " Welsh Springer Spaniel 50 0.92 1\n", + " Cocker Spaniels 50 0.7 0.96\n", + " Sussex Spaniel 50 0.7 0.88\n", + " Irish Water Spaniel 50 0.86 0.94\n", + " Kuvasz 50 0.7 0.92\n", + " Schipperke 50 0.94 0.98\n", + " Groenendael 50 0.78 0.92\n", + " Malinois 50 0.92 0.98\n", + " Briard 50 0.6 0.84\n", + " Australian Kelpie 50 0.74 0.96\n", + " Komondor 50 0.9 0.96\n", + " Old English Sheepdog 50 0.94 0.98\n", + " Shetland Sheepdog 50 0.72 0.94\n", + " collie 50 0.6 0.96\n", + " Border Collie 50 0.82 0.96\n", + " Bouvier des Flandres 50 0.78 0.96\n", + " Rottweiler 50 0.94 0.98\n", + " German Shepherd Dog 50 0.76 0.98\n", + " Dobermann 50 0.74 1\n", + " Miniature Pinscher 50 0.76 0.96\n", + "Greater Swiss Mountain Dog 50 0.66 0.94\n", + " Bernese Mountain Dog 50 0.94 1\n", + " Appenzeller Sennenhund 50 0.3 1\n", + " Entlebucher Sennenhund 50 0.72 0.98\n", + " Boxer 50 0.7 0.92\n", + " Bullmastiff 50 0.8 0.98\n", + " Tibetan Mastiff 50 0.92 0.98\n", + " French Bulldog 50 0.86 0.98\n", + " Great Dane 50 0.6 0.92\n", + " St. Bernard 50 0.94 1\n", + " husky 50 0.5 0.94\n", + " Alaskan Malamute 50 0.76 0.96\n", + " Siberian Husky 50 0.56 0.98\n", + " Dalmatian 50 0.94 0.98\n", + " Affenpinscher 50 0.76 0.92\n", + " Basenji 50 0.9 1\n", + " pug 50 0.96 0.98\n", + " Leonberger 50 0.98 1\n", + " Newfoundland 50 0.82 0.96\n", + " Pyrenean Mountain Dog 50 0.76 0.94\n", + " Samoyed 50 0.9 0.98\n", + " Pomeranian 50 0.96 1\n", + " Chow Chow 50 0.88 0.96\n", + " Keeshond 50 0.94 1\n", + " Griffon Bruxellois 50 0.92 0.98\n", + " Pembroke Welsh Corgi 50 0.9 0.98\n", + " Cardigan Welsh Corgi 50 0.7 0.94\n", + " Toy Poodle 50 0.52 0.96\n", + " Miniature Poodle 50 0.56 0.92\n", + " Standard Poodle 50 0.78 0.96\n", + " Mexican hairless dog 50 0.86 0.98\n", + " grey wolf 50 0.74 0.92\n", + " Alaskan tundra wolf 50 0.86 0.98\n", + " red wolf 50 0.54 0.92\n", + " coyote 50 0.62 0.82\n", + " dingo 50 0.76 0.94\n", + " dhole 50 0.9 0.96\n", + " African wild dog 50 1 1\n", + " hyena 50 0.9 0.94\n", + " red fox 50 0.62 0.92\n", + " kit fox 50 0.7 0.98\n", + " Arctic fox 50 0.92 0.98\n", + " grey fox 50 0.66 0.96\n", + " tabby cat 50 0.58 0.92\n", + " tiger cat 50 0.2 0.94\n", + " Persian cat 50 0.92 1\n", + " Siamese cat 50 0.94 0.98\n", + " Egyptian Mau 50 0.52 0.84\n", + " cougar 50 0.94 0.96\n", + " lynx 50 0.74 0.9\n", + " leopard 50 0.86 1\n", + " snow leopard 50 0.9 0.98\n", + " jaguar 50 0.72 0.92\n", + " lion 50 0.9 0.98\n", + " tiger 50 0.96 0.98\n", + " cheetah 50 0.94 0.98\n", + " brown bear 50 0.9 0.98\n", + " American black bear 50 0.9 0.98\n", + " polar bear 50 0.86 0.94\n", + " sloth bear 50 0.72 0.92\n", + " mongoose 50 0.7 0.86\n", + " meerkat 50 0.82 0.98\n", + " tiger beetle 50 0.9 0.94\n", + " ladybug 50 0.78 0.98\n", + " ground beetle 50 0.62 0.94\n", + " longhorn beetle 50 0.58 0.9\n", + " leaf beetle 50 0.66 0.98\n", + " dung beetle 50 0.88 0.98\n", + " rhinoceros beetle 50 0.88 1\n", + " weevil 50 0.92 1\n", + " fly 50 0.78 0.94\n", + " bee 50 0.8 0.96\n", + " ant 50 0.68 0.84\n", + " grasshopper 50 0.48 0.9\n", + " cricket 50 0.66 0.94\n", + " stick insect 50 0.7 0.94\n", + " cockroach 50 0.72 0.84\n", + " mantis 50 0.72 0.9\n", + " cicada 50 0.9 0.96\n", + " leafhopper 50 0.9 0.96\n", + " lacewing 50 0.8 0.94\n", + " dragonfly 50 0.76 0.98\n", + " damselfly 50 0.82 1\n", + " red admiral 50 0.96 0.96\n", + " ringlet 50 0.88 1\n", + " monarch butterfly 50 0.9 0.96\n", + " small white 50 0.88 1\n", + " sulphur butterfly 50 0.92 1\n", + "gossamer-winged butterfly 50 0.9 1\n", + " starfish 50 0.82 0.94\n", + " sea urchin 50 0.84 0.98\n", + " sea cucumber 50 0.76 0.92\n", + " cottontail rabbit 50 0.7 0.98\n", + " hare 50 0.9 1\n", + " Angora rabbit 50 0.92 0.98\n", + " hamster 50 1 1\n", + " porcupine 50 0.9 0.98\n", + " fox squirrel 50 0.82 0.96\n", + " marmot 50 0.94 0.96\n", + " beaver 50 0.78 0.96\n", + " guinea pig 50 0.78 0.92\n", + " common sorrel 50 0.98 0.98\n", + " zebra 50 0.96 0.98\n", + " pig 50 0.54 0.82\n", + " wild boar 50 0.86 0.96\n", + " warthog 50 0.96 0.96\n", + " hippopotamus 50 0.9 1\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " ox 50 0.52 0.94\n", + " water buffalo 50 0.86 0.94\n", + " bison 50 0.9 0.98\n", + " ram 50 0.62 0.98\n", + " bighorn sheep 50 0.72 1\n", + " Alpine ibex 50 0.96 0.98\n", + " hartebeest 50 0.94 1\n", + " impala 50 0.86 0.98\n", + " gazelle 50 0.74 0.96\n", + " dromedary 50 0.94 1\n", + " llama 50 0.86 0.94\n", + " weasel 50 0.42 0.96\n", + " mink 50 0.78 0.92\n", + " European polecat 50 0.54 0.88\n", + " black-footed ferret 50 0.74 0.96\n", + " otter 50 0.68 0.9\n", + " skunk 50 0.94 0.96\n", + " badger 50 0.88 0.92\n", + " armadillo 50 0.88 0.96\n", + " three-toed sloth 50 0.96 1\n", + " orangutan 50 0.82 0.9\n", + " gorilla 50 0.78 0.94\n", + " chimpanzee 50 0.86 0.94\n", + " gibbon 50 0.74 0.9\n", + " siamang 50 0.68 0.94\n", + " guenon 50 0.82 0.96\n", + " patas monkey 50 0.66 0.86\n", + " baboon 50 0.88 0.96\n", + " macaque 50 0.72 0.84\n", + " langur 50 0.56 0.78\n", + " black-and-white colobus 50 0.84 0.92\n", + " proboscis monkey 50 0.98 1\n", + " marmoset 50 0.7 0.92\n", + " white-headed capuchin 50 0.82 0.94\n", + " howler monkey 50 0.9 0.96\n", + " titi 50 0.54 0.9\n", + "Geoffroy's spider monkey 50 0.36 0.86\n", + " common squirrel monkey 50 0.76 0.92\n", + " ring-tailed lemur 50 0.7 0.94\n", + " indri 50 0.86 0.98\n", + " Asian elephant 50 0.54 0.96\n", + " African bush elephant 50 0.62 0.96\n", + " red panda 50 0.94 0.94\n", + " giant panda 50 0.92 0.98\n", + " snoek 50 0.76 0.9\n", + " eel 50 0.58 0.86\n", + " coho salmon 50 0.8 0.98\n", + " rock beauty 50 0.8 0.96\n", + " clownfish 50 0.8 0.98\n", + " sturgeon 50 0.76 0.96\n", + " garfish 50 0.7 0.82\n", + " lionfish 50 0.94 0.98\n", + " pufferfish 50 0.86 0.98\n", + " abacus 50 0.8 0.88\n", + " abaya 50 0.72 0.94\n", + " academic gown 50 0.44 0.94\n", + " accordion 50 0.78 0.96\n", + " acoustic guitar 50 0.54 0.78\n", + " aircraft carrier 50 0.7 0.98\n", + " airliner 50 0.92 1\n", + " airship 50 0.8 0.88\n", + " altar 50 0.6 0.94\n", + " ambulance 50 0.84 0.98\n", + " amphibious vehicle 50 0.68 0.9\n", + " analog clock 50 0.5 0.88\n", + " apiary 50 0.9 1\n", + " apron 50 0.68 0.86\n", + " waste container 50 0.6 0.86\n", + " assault rifle 50 0.36 0.9\n", + " backpack 50 0.36 0.72\n", + " bakery 50 0.38 0.64\n", + " balance beam 50 0.84 0.98\n", + " balloon 50 0.88 0.96\n", + " ballpoint pen 50 0.52 0.96\n", + " Band-Aid 50 0.68 0.96\n", + " banjo 50 0.9 1\n", + " baluster 50 0.74 0.94\n", + " barbell 50 0.58 0.9\n", + " barber chair 50 0.72 0.9\n", + " barbershop 50 0.64 0.9\n", + " barn 50 0.96 0.96\n", + " barometer 50 0.86 0.96\n", + " barrel 50 0.64 0.86\n", + " wheelbarrow 50 0.64 0.92\n", + " baseball 50 0.76 0.96\n", + " basketball 50 0.88 0.98\n", + " bassinet 50 0.8 0.94\n", + " bassoon 50 0.84 0.98\n", + " swimming cap 50 0.7 0.88\n", + " bath towel 50 0.56 0.84\n", + " bathtub 50 0.34 0.86\n", + " station wagon 50 0.68 0.9\n", + " lighthouse 50 0.74 0.96\n", + " beaker 50 0.46 0.7\n", + " military cap 50 0.88 0.98\n", + " beer bottle 50 0.72 0.9\n", + " beer glass 50 0.72 0.9\n", + " bell-cot 50 0.6 0.96\n", + " bib 50 0.58 0.86\n", + " tandem bicycle 50 0.76 0.96\n", + " bikini 50 0.52 0.88\n", + " ring binder 50 0.7 0.86\n", + " binoculars 50 0.54 0.78\n", + " birdhouse 50 0.86 0.96\n", + " boathouse 50 0.78 0.96\n", + " bobsleigh 50 0.94 0.96\n", + " bolo tie 50 0.86 0.88\n", + " poke bonnet 50 0.68 0.88\n", + " bookcase 50 0.68 0.92\n", + " bookstore 50 0.58 0.88\n", + " bottle cap 50 0.62 0.8\n", + " bow 50 0.74 0.84\n", + " bow tie 50 0.68 0.92\n", + " brass 50 0.92 0.98\n", + " bra 50 0.52 0.76\n", + " breakwater 50 0.64 0.94\n", + " breastplate 50 0.36 0.9\n", + " broom 50 0.58 0.84\n", + " bucket 50 0.58 0.88\n", + " buckle 50 0.5 0.76\n", + " bulletproof vest 50 0.52 0.76\n", + " high-speed train 50 0.94 0.98\n", + " butcher shop 50 0.76 0.94\n", + " taxicab 50 0.7 0.92\n", + " cauldron 50 0.5 0.72\n", + " candle 50 0.5 0.76\n", + " cannon 50 0.88 0.96\n", + " canoe 50 0.94 1\n", + " can opener 50 0.72 0.88\n", + " cardigan 50 0.66 0.88\n", + " car mirror 50 0.94 0.98\n", + " carousel 50 0.96 0.96\n", + " tool kit 50 0.68 0.84\n", + " carton 50 0.44 0.78\n", + " car wheel 50 0.4 0.78\n", + "automated teller machine 50 0.82 0.94\n", + " cassette 50 0.62 0.84\n", + " cassette player 50 0.3 0.92\n", + " castle 50 0.74 0.9\n", + " catamaran 50 0.74 0.98\n", + " CD player 50 0.52 0.8\n", + " cello 50 0.84 1\n", + " mobile phone 50 0.72 0.86\n", + " chain 50 0.34 0.78\n", + " chain-link fence 50 0.7 0.86\n", + " chain mail 50 0.68 0.86\n", + " chainsaw 50 0.88 0.96\n", + " chest 50 0.7 0.88\n", + " chiffonier 50 0.32 0.64\n", + " chime 50 0.64 0.84\n", + " china cabinet 50 0.78 0.94\n", + " Christmas stocking 50 0.92 0.98\n", + " church 50 0.6 0.86\n", + " movie theater 50 0.68 0.9\n", + " cleaver 50 0.36 0.68\n", + " cliff dwelling 50 0.86 1\n", + " cloak 50 0.28 0.7\n", + " clogs 50 0.6 0.88\n", + " cocktail shaker 50 0.62 0.76\n", + " coffee mug 50 0.48 0.78\n", + " coffeemaker 50 0.62 0.92\n", + " coil 50 0.64 0.86\n", + " combination lock 50 0.62 0.92\n", + " computer keyboard 50 0.72 0.92\n", + " confectionery store 50 0.56 0.84\n", + " container ship 50 0.82 0.98\n", + " convertible 50 0.78 1\n", + " corkscrew 50 0.84 0.98\n", + " cornet 50 0.56 0.98\n", + " cowboy boot 50 0.66 0.78\n", + " cowboy hat 50 0.66 0.88\n", + " cradle 50 0.34 0.8\n", + " crane (machine) 50 0.8 0.92\n", + " crash helmet 50 0.92 0.96\n", + " crate 50 0.6 0.86\n", + " infant bed 50 0.8 0.96\n", + " Crock Pot 50 0.78 0.88\n", + " croquet ball 50 0.9 1\n", + " crutch 50 0.42 0.7\n", + " cuirass 50 0.54 0.92\n", + " dam 50 0.78 0.92\n", + " desk 50 0.68 0.88\n", + " desktop computer 50 0.54 0.9\n", + " rotary dial telephone 50 0.92 0.96\n", + " diaper 50 0.68 0.84\n", + " digital clock 50 0.6 0.8\n", + " digital watch 50 0.56 0.82\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " dining table 50 0.78 0.88\n", + " dishcloth 50 0.98 1\n", + " dishwasher 50 0.52 0.74\n", + " disc brake 50 0.96 1\n", + " dock 50 0.56 0.96\n", + " dog sled 50 0.9 0.98\n", + " dome 50 0.74 0.96\n", + " doormat 50 0.6 0.82\n", + " drilling rig 50 0.82 0.94\n", + " drum 50 0.4 0.72\n", + " drumstick 50 0.56 0.82\n", + " dumbbell 50 0.6 0.92\n", + " Dutch oven 50 0.66 0.88\n", + " electric fan 50 0.82 0.84\n", + " electric guitar 50 0.66 0.92\n", + " electric locomotive 50 0.92 0.98\n", + " entertainment center 50 0.92 1\n", + " envelope 50 0.58 0.88\n", + " espresso machine 50 0.72 0.94\n", + " face powder 50 0.76 0.92\n", + " feather boa 50 0.8 0.88\n", + " filing cabinet 50 0.84 0.98\n", + " fireboat 50 0.96 0.96\n", + " fire engine 50 0.82 0.92\n", + " fire screen sheet 50 0.52 0.78\n", + " flagpole 50 0.76 0.92\n", + " flute 50 0.4 0.76\n", + " folding chair 50 0.68 0.9\n", + " football helmet 50 0.9 0.96\n", + " forklift 50 0.8 0.94\n", + " fountain 50 0.88 0.92\n", + " fountain pen 50 0.76 0.92\n", + " four-poster bed 50 0.82 0.92\n", + " freight car 50 0.98 0.98\n", + " French horn 50 0.76 0.92\n", + " frying pan 50 0.48 0.82\n", + " fur coat 50 0.86 0.96\n", + " garbage truck 50 0.9 0.98\n", + " gas mask 50 0.82 0.92\n", + " gas pump 50 0.82 0.98\n", + " goblet 50 0.64 0.9\n", + " go-kart 50 0.9 1\n", + " golf ball 50 0.86 0.96\n", + " golf cart 50 0.76 0.9\n", + " gondola 50 0.94 0.98\n", + " gong 50 0.74 0.92\n", + " gown 50 0.72 0.94\n", + " grand piano 50 0.74 0.96\n", + " greenhouse 50 0.84 1\n", + " grille 50 0.72 0.88\n", + " grocery store 50 0.68 0.9\n", + " guillotine 50 0.84 0.94\n", + " barrette 50 0.48 0.68\n", + " hair spray 50 0.4 0.76\n", + " half-track 50 0.76 0.96\n", + " hammer 50 0.54 0.78\n", + " hamper 50 0.72 0.9\n", + " hair dryer 50 0.7 0.8\n", + " hand-held computer 50 0.52 0.88\n", + " handkerchief 50 0.8 0.96\n", + " hard disk drive 50 0.78 0.86\n", + " harmonica 50 0.68 0.96\n", + " harp 50 0.9 0.96\n", + " harvester 50 0.86 1\n", + " hatchet 50 0.6 0.84\n", + " holster 50 0.7 0.84\n", + " home theater 50 0.72 0.96\n", + " honeycomb 50 0.74 0.86\n", + " hook 50 0.28 0.62\n", + " hoop skirt 50 0.68 0.8\n", + " horizontal bar 50 0.76 0.98\n", + " horse-drawn vehicle 50 0.9 0.9\n", + " hourglass 50 0.92 0.98\n", + " iPod 50 0.9 0.94\n", + " clothes iron 50 0.72 0.9\n", + " jack-o'-lantern 50 0.94 0.98\n", + " jeans 50 0.7 0.82\n", + " jeep 50 0.76 0.9\n", + " T-shirt 50 0.72 0.94\n", + " jigsaw puzzle 50 0.92 0.96\n", + " pulled rickshaw 50 0.88 0.96\n", + " joystick 50 0.74 0.98\n", + " kimono 50 0.78 0.94\n", + " knee pad 50 0.7 0.86\n", + " knot 50 0.8 0.86\n", + " lab coat 50 0.82 0.98\n", + " ladle 50 0.26 0.64\n", + " lampshade 50 0.62 0.8\n", + " laptop computer 50 0.2 0.88\n", + " lawn mower 50 0.8 0.96\n", + " lens cap 50 0.5 0.8\n", + " paper knife 50 0.3 0.58\n", + " library 50 0.62 0.92\n", + " lifeboat 50 0.94 0.98\n", + " lighter 50 0.56 0.8\n", + " limousine 50 0.74 0.92\n", + " ocean liner 50 0.88 0.96\n", + " lipstick 50 0.7 0.88\n", + " slip-on shoe 50 0.82 0.94\n", + " lotion 50 0.56 0.9\n", + " speaker 50 0.58 0.64\n", + " loupe 50 0.32 0.54\n", + " sawmill 50 0.74 0.9\n", + " magnetic compass 50 0.48 0.78\n", + " mail bag 50 0.64 0.94\n", + " mailbox 50 0.82 0.92\n", + " tights 50 0.28 0.9\n", + " tank suit 50 0.3 0.88\n", + " manhole cover 50 0.94 0.98\n", + " maraca 50 0.72 0.86\n", + " marimba 50 0.84 0.94\n", + " mask 50 0.48 0.78\n", + " match 50 0.74 0.92\n", + " maypole 50 0.96 1\n", + " maze 50 0.82 1\n", + " measuring cup 50 0.66 0.82\n", + " medicine chest 50 0.6 0.9\n", + " megalith 50 0.84 0.92\n", + " microphone 50 0.56 0.74\n", + " microwave oven 50 0.56 0.8\n", + " military uniform 50 0.62 0.86\n", + " milk can 50 0.7 0.82\n", + " minibus 50 0.68 1\n", + " miniskirt 50 0.58 0.84\n", + " minivan 50 0.48 0.8\n", + " missile 50 0.34 0.82\n", + " mitten 50 0.76 0.88\n", + " mixing bowl 50 0.82 0.98\n", + " mobile home 50 0.58 0.8\n", + " Model T 50 0.92 0.96\n", + " modem 50 0.7 0.9\n", + " monastery 50 0.52 0.86\n", + " monitor 50 0.34 0.86\n", + " moped 50 0.56 0.94\n", + " mortar 50 0.72 0.88\n", + " square academic cap 50 0.48 0.82\n", + " mosque 50 0.98 1\n", + " mosquito net 50 0.96 0.98\n", + " scooter 50 0.88 0.98\n", + " mountain bike 50 0.74 0.96\n", + " tent 50 0.88 0.96\n", + " computer mouse 50 0.38 0.82\n", + " mousetrap 50 0.82 0.9\n", + " moving van 50 0.48 0.8\n", + " muzzle 50 0.5 0.74\n", + " nail 50 0.68 0.76\n", + " neck brace 50 0.62 0.72\n", + " necklace 50 0.92 1\n", + " nipple 50 0.8 0.92\n", + " notebook computer 50 0.34 0.88\n", + " obelisk 50 0.82 0.94\n", + " oboe 50 0.62 0.84\n", + " ocarina 50 0.82 0.88\n", + " odometer 50 0.98 1\n", + " oil filter 50 0.6 0.82\n", + " organ 50 0.84 0.94\n", + " oscilloscope 50 0.94 0.96\n", + " overskirt 50 0.2 0.62\n", + " bullock cart 50 0.76 0.94\n", + " oxygen mask 50 0.48 0.8\n", + " packet 50 0.54 0.74\n", + " paddle 50 0.7 0.94\n", + " paddle wheel 50 0.92 0.98\n", + " padlock 50 0.64 0.78\n", + " paintbrush 50 0.66 0.78\n", + " pajamas 50 0.68 0.94\n", + " palace 50 0.66 0.94\n", + " pan flute 50 0.84 0.86\n", + " paper towel 50 0.68 0.86\n", + " parachute 50 0.92 0.96\n", + " parallel bars 50 0.68 0.96\n", + " park bench 50 0.82 0.94\n", + " parking meter 50 0.86 0.98\n", + " passenger car 50 0.48 0.86\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " patio 50 0.6 0.84\n", + " payphone 50 0.78 0.94\n", + " pedestal 50 0.66 0.86\n", + " pencil case 50 0.74 0.98\n", + " pencil sharpener 50 0.6 0.76\n", + " perfume 50 0.66 0.96\n", + " Petri dish 50 0.64 0.82\n", + " photocopier 50 0.94 1\n", + " plectrum 50 0.72 0.92\n", + " Pickelhaube 50 0.78 0.88\n", + " picket fence 50 0.86 0.94\n", + " pickup truck 50 0.72 0.94\n", + " pier 50 0.54 0.92\n", + " piggy bank 50 0.8 0.94\n", + " pill bottle 50 0.72 0.9\n", + " pillow 50 0.76 0.88\n", + " ping-pong ball 50 0.78 0.88\n", + " pinwheel 50 0.8 0.94\n", + " pirate ship 50 0.76 0.92\n", + " pitcher 50 0.48 0.86\n", + " hand plane 50 0.9 0.92\n", + " planetarium 50 0.9 0.98\n", + " plastic bag 50 0.42 0.66\n", + " plate rack 50 0.52 0.82\n", + " plow 50 0.8 0.94\n", + " plunger 50 0.42 0.72\n", + " Polaroid camera 50 0.84 0.94\n", + " pole 50 0.4 0.76\n", + " police van 50 0.84 0.94\n", + " poncho 50 0.64 0.88\n", + " billiard table 50 0.84 0.92\n", + " soda bottle 50 0.58 0.9\n", + " pot 50 0.86 0.94\n", + " potter's wheel 50 0.92 0.94\n", + " power drill 50 0.38 0.7\n", + " prayer rug 50 0.7 0.88\n", + " printer 50 0.52 0.86\n", + " prison 50 0.66 0.9\n", + " projectile 50 0.34 0.96\n", + " projector 50 0.6 0.82\n", + " hockey puck 50 0.9 0.98\n", + " punching bag 50 0.62 0.72\n", + " purse 50 0.48 0.88\n", + " quill 50 0.78 0.86\n", + " quilt 50 0.6 0.9\n", + " race car 50 0.72 0.92\n", + " racket 50 0.78 0.94\n", + " radiator 50 0.7 0.84\n", + " radio 50 0.68 0.9\n", + " radio telescope 50 0.88 0.94\n", + " rain barrel 50 0.8 0.96\n", + " recreational vehicle 50 0.84 0.96\n", + " reel 50 0.72 0.8\n", + " reflex camera 50 0.76 0.96\n", + " refrigerator 50 0.76 0.92\n", + " remote control 50 0.72 0.94\n", + " restaurant 50 0.52 0.62\n", + " revolver 50 0.8 0.98\n", + " rifle 50 0.46 0.76\n", + " rocking chair 50 0.72 0.9\n", + " rotisserie 50 0.88 0.96\n", + " eraser 50 0.62 0.76\n", + " rugby ball 50 0.84 0.94\n", + " ruler 50 0.72 0.86\n", + " running shoe 50 0.84 0.94\n", + " safe 50 0.9 0.94\n", + " safety pin 50 0.48 0.8\n", + " salt shaker 50 0.62 0.8\n", + " sandal 50 0.7 0.82\n", + " sarong 50 0.62 0.8\n", + " saxophone 50 0.66 0.9\n", + " scabbard 50 0.78 0.92\n", + " weighing scale 50 0.62 0.84\n", + " school bus 50 0.92 1\n", + " schooner 50 0.8 1\n", + " scoreboard 50 0.86 0.98\n", + " CRT screen 50 0.16 0.8\n", + " screw 50 0.96 0.98\n", + " screwdriver 50 0.4 0.58\n", + " seat belt 50 0.9 0.92\n", + " sewing machine 50 0.74 0.94\n", + " shield 50 0.64 0.78\n", + " shoe store 50 0.84 0.98\n", + " shoji 50 0.76 0.92\n", + " shopping basket 50 0.52 0.84\n", + " shopping cart 50 0.76 0.9\n", + " shovel 50 0.7 0.84\n", + " shower cap 50 0.74 0.88\n", + " shower curtain 50 0.72 0.9\n", + " ski 50 0.68 0.94\n", + " ski mask 50 0.66 0.9\n", + " sleeping bag 50 0.66 0.8\n", + " slide rule 50 0.7 0.86\n", + " sliding door 50 0.54 0.76\n", + " slot machine 50 0.92 0.96\n", + " snorkel 50 0.86 1\n", + " snowmobile 50 0.86 0.96\n", + " snowplow 50 0.9 1\n", + " soap dispenser 50 0.52 0.9\n", + " soccer ball 50 0.84 0.98\n", + " sock 50 0.66 0.78\n", + " solar thermal collector 50 0.72 0.9\n", + " sombrero 50 0.7 0.84\n", + " soup bowl 50 0.6 0.94\n", + " space bar 50 0.32 0.84\n", + " space heater 50 0.64 0.74\n", + " space shuttle 50 0.86 0.98\n", + " spatula 50 0.28 0.6\n", + " motorboat 50 0.94 1\n", + " spider web 50 0.76 0.96\n", + " spindle 50 0.92 1\n", + " sports car 50 0.5 0.96\n", + " spotlight 50 0.34 0.66\n", + " stage 50 0.76 0.92\n", + " steam locomotive 50 0.96 1\n", + " through arch bridge 50 0.82 0.96\n", + " steel drum 50 0.8 0.94\n", + " stethoscope 50 0.52 0.84\n", + " scarf 50 0.54 0.92\n", + " stone wall 50 0.8 0.92\n", + " stopwatch 50 0.54 0.9\n", + " stove 50 0.46 0.78\n", + " strainer 50 0.58 0.84\n", + " tram 50 0.9 0.96\n", + " stretcher 50 0.46 0.74\n", + " couch 50 0.72 0.94\n", + " stupa 50 0.84 0.9\n", + " submarine 50 0.78 0.9\n", + " suit 50 0.62 0.88\n", + " sundial 50 0.46 0.78\n", + " sunglass 50 0.18 0.6\n", + " sunglasses 50 0.32 0.64\n", + " sunscreen 50 0.32 0.7\n", + " suspension bridge 50 0.64 0.94\n", + " mop 50 0.8 0.96\n", + " sweatshirt 50 0.26 0.68\n", + " swimsuit 50 0.6 0.84\n", + " swing 50 0.78 0.88\n", + " switch 50 0.62 0.8\n", + " syringe 50 0.68 0.8\n", + " table lamp 50 0.54 0.88\n", + " tank 50 0.78 0.94\n", + " tape player 50 0.38 0.88\n", + " teapot 50 0.82 1\n", + " teddy bear 50 0.82 0.92\n", + " television 50 0.6 0.9\n", + " tennis ball 50 0.7 0.94\n", + " thatched roof 50 0.86 0.94\n", + " front curtain 50 0.76 0.94\n", + " thimble 50 0.68 0.82\n", + " threshing machine 50 0.64 0.9\n", + " throne 50 0.68 0.82\n", + " tile roof 50 0.84 0.96\n", + " toaster 50 0.64 0.82\n", + " tobacco shop 50 0.44 0.74\n", + " toilet seat 50 0.64 0.88\n", + " torch 50 0.62 0.86\n", + " totem pole 50 0.9 1\n", + " tow truck 50 0.64 0.92\n", + " toy store 50 0.64 0.9\n", + " tractor 50 0.86 0.98\n", + " semi-trailer truck 50 0.76 0.96\n", + " tray 50 0.54 0.76\n", + " trench coat 50 0.6 0.78\n", + " tricycle 50 0.78 0.96\n", + " trimaran 50 0.78 0.98\n", + " tripod 50 0.66 0.86\n", + " triumphal arch 50 0.92 0.98\n", + " trolleybus 50 0.98 1\n", + " trombone 50 0.66 0.94\n", + " tub 50 0.3 0.86\n", + " turnstile 50 0.8 0.9\n", + " typewriter keyboard 50 0.74 0.98\n", + " umbrella 50 0.6 0.78\n", + " unicycle 50 0.78 0.96\n", + " upright piano 50 0.84 0.94\n", + " vacuum cleaner 50 0.84 0.92\n", + " vase 50 0.56 0.74\n", + " vault 50 0.78 0.9\n", + " velvet 50 0.22 0.5\n", + " vending machine 50 0.94 1\n", + " vestment 50 0.62 0.86\n", + " viaduct 50 0.78 0.88\n", + " violin 50 0.64 0.88\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " volleyball 50 0.96 1\n", + " waffle iron 50 0.72 0.84\n", + " wall clock 50 0.58 0.86\n", + " wallet 50 0.58 0.94\n", + " wardrobe 50 0.7 0.9\n", + " military aircraft 50 0.9 0.98\n", + " sink 50 0.74 0.94\n", + " washing machine 50 0.82 0.94\n", + " water bottle 50 0.54 0.68\n", + " water jug 50 0.3 0.78\n", + " water tower 50 0.94 0.96\n", + " whiskey jug 50 0.64 0.76\n", + " whistle 50 0.7 0.82\n", + " wig 50 0.86 0.88\n", + " window screen 50 0.7 0.82\n", + " window shade 50 0.54 0.9\n", + " Windsor tie 50 0.32 0.64\n", + " wine bottle 50 0.46 0.76\n", + " wing 50 0.52 0.96\n", + " wok 50 0.54 0.92\n", + " wooden spoon 50 0.62 0.86\n", + " wool 50 0.42 0.84\n", + " split-rail fence 50 0.7 0.92\n", + " shipwreck 50 0.86 0.98\n", + " yawl 50 0.76 0.92\n", + " yurt 50 0.86 0.96\n", + " website 50 0.98 1\n", + " comic book 50 0.72 0.88\n", + " crossword 50 0.8 0.88\n", + " traffic sign 50 0.72 0.9\n", + " traffic light 50 0.8 0.96\n", + " dust jacket 50 0.78 0.94\n", + " menu 50 0.8 0.96\n", + " plate 50 0.44 0.86\n", + " guacamole 50 0.76 0.96\n", + " consomme 50 0.52 0.92\n", + " hot pot 50 0.78 1\n", + " trifle 50 0.9 1\n", + " ice cream 50 0.68 0.94\n", + " ice pop 50 0.68 0.8\n", + " baguette 50 0.62 0.88\n", + " bagel 50 0.64 0.86\n", + " pretzel 50 0.68 0.9\n", + " cheeseburger 50 0.92 0.96\n", + " hot dog 50 0.74 0.96\n", + " mashed potato 50 0.72 0.88\n", + " cabbage 50 0.88 0.98\n", + " broccoli 50 0.88 0.96\n", + " cauliflower 50 0.84 0.98\n", + " zucchini 50 0.68 0.98\n", + " spaghetti squash 50 0.82 0.96\n", + " acorn squash 50 0.8 1\n", + " butternut squash 50 0.72 0.94\n", + " cucumber 50 0.66 0.94\n", + " artichoke 50 0.86 0.96\n", + " bell pepper 50 0.86 0.94\n", + " cardoon 50 0.92 0.94\n", + " mushroom 50 0.38 0.96\n", + " Granny Smith 50 0.9 0.98\n", + " strawberry 50 0.64 0.88\n", + " orange 50 0.74 0.94\n", + " lemon 50 0.78 0.98\n", + " fig 50 0.84 0.94\n", + " pineapple 50 0.9 1\n", + " banana 50 0.88 0.98\n", + " jackfruit 50 0.96 0.98\n", + " custard apple 50 0.86 0.96\n", + " pomegranate 50 0.8 0.96\n", + " hay 50 0.84 0.96\n", + " carbonara 50 0.88 0.96\n", + " chocolate syrup 50 0.58 0.94\n", + " dough 50 0.36 0.68\n", + " meatloaf 50 0.64 0.88\n", + " pizza 50 0.78 0.9\n", + " pot pie 50 0.66 0.92\n", + " burrito 50 0.88 0.98\n", + " red wine 50 0.66 0.84\n", + " espresso 50 0.66 0.9\n", + " cup 50 0.42 0.78\n", + " eggnog 50 0.36 0.64\n", + " alp 50 0.54 0.94\n", + " bubble 50 0.86 0.96\n", + " cliff 50 0.66 1\n", + " coral reef 50 0.74 0.94\n", + " geyser 50 0.92 1\n", + " lakeshore 50 0.52 0.86\n", + " promontory 50 0.58 0.92\n", + " shoal 50 0.66 0.98\n", + " seashore 50 0.44 0.86\n", + " valley 50 0.72 0.98\n", + " volcano 50 0.72 0.94\n", + " baseball player 50 0.74 0.96\n", + " bridegroom 50 0.78 0.92\n", + " scuba diver 50 0.82 1\n", + " rapeseed 50 0.98 0.98\n", + " daisy 50 0.96 0.98\n", + " yellow lady's slipper 50 1 1\n", + " corn 50 0.42 0.86\n", + " acorn 50 0.96 0.98\n", + " rose hip 50 0.9 0.96\n", + " horse chestnut seed 50 1 1\n", + " coral fungus 50 0.98 0.98\n", + " agaric 50 0.84 0.94\n", + " gyromitra 50 0.98 0.98\n", + " stinkhorn mushroom 50 0.84 0.92\n", + " earth star 50 1 1\n", + " hen-of-the-woods 50 0.9 0.96\n", + " bolete 50 0.8 0.94\n", + " ear 50 0.54 0.94\n", + " toilet paper 50 0.44 0.68\n", + "Speed: 0.1ms pre-process, 0.2ms inference, 0.0ms post-process per image at shape (1, 3, 320, 320)\n", + "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s on Imagenet val\n", + "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 320 --half" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " %pip install -q clearml && clearml-init" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=160, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 7 commits. Use `git pull ultralytics master` or `git clone https://github.com/ultralytics/yolov5` to update.\n", + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0m⚠️ not found, install with `pip install albumentations` (recommended)\n", + "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", + "Image sizes 160 train, 160 test\n", + "Using 3 dataloader workers\n", + "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", + "\n", + " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", + " 1/3 0.369G 1.05 0.935 0.837 0.985: 100%|█████\n", + " 2/3 0.369G 0.767 0.873 0.859 0.982: 100%|█████\n", + " 3/3 0.369G 0.626 0.713 0.927 0.992: 100%|█████\n", + "\n", + "Training complete (0.025 hours)\n", + "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", + "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /home/paguerrie/datasets/imagenette160\n", + "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", + "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", + "Visualize: https://netron.app\n", + "\n" + ] + } + ], + "source": [ + "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", + "!python classify/train.py --img 160 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "machine_shape": "hm", + "name": "YOLOv5 Tutorial", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "0856bea36ec148b68522ff9c9eb258d8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0ace3934ec6f4d36a1b3a9e086390926": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "35e03ce5090346c9ae602891470fc555": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", + "max": 818322941, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", + "value": 818322941 + } + }, + "574140e4c4bc48c9a171541a02cd0211": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", + "placeholder": "​", + "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", + "value": "100%" + } + }, + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "60b913d755b34d638478e30705a2dde1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "65881db1db8a4e9c930fab9172d45143": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "76879f6f2aa54637a7a07faeea2bd684": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9b8caa3522fc4cbab31e13b5dfc7808d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", + "IPY_MODEL_35e03ce5090346c9ae602891470fc555", + "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" + ], + "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" + } + }, + "c942c208e72d46568b476bb0f2d75496": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", + "placeholder": "​", + "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", + "value": " 780M/780M [02:19<00:00, 6.24MB/s]" + } + }, + "d6b7a2243e0c4beca714d99dceec23d6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} From 005161514f0db7203195dae99caa94a617ac09f9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 22:10:35 +0100 Subject: [PATCH 264/326] Remove Colab notebook High-Memory notices (#10212) * Remove Colab notebook High-Memory notices Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/tutorial.ipynb | 5 ++--- segment/tutorial.ipynb | 7 +++---- tutorial.ipynb | 3 +-- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 8ed8b5db8a35..f235b754d7b4 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -14,7 +14,7 @@ "\n", "
\n", " \"Run\n", - " \"Open\n", + " \"Open\n", " \"Open\n", "
\n", "\n", @@ -1469,8 +1469,7 @@ "accelerator": "GPU", "colab": { "collapsed_sections": [], - "machine_shape": "hm", - "name": "YOLOv5 Tutorial", + "name": "YOLOv5 Classification Tutorial", "provenance": [], "toc_visible": true }, diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index c26878fb0dbf..f3f978d43d93 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -14,7 +14,7 @@ "\n", "
\n", " \"Run\n", - " \"Open\n", + " \"Open\n", " \"Open\n", "
\n", "\n", @@ -572,8 +572,7 @@ "metadata": { "accelerator": "GPU", "colab": { - "machine_shape": "hm", - "name": "YOLOv5 Tutorial", + "name": "YOLOv5 Segmentation Tutorial", "provenance": [], "toc_visible": true }, @@ -597,4 +596,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorial.ipynb b/tutorial.ipynb index 07a6625a1491..eb5b675db2be 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -5,7 +5,6 @@ "colab": { "name": "YOLOv5 Tutorial", "provenance": [], - "machine_shape": "hm", "toc_visible": true }, "kernelspec": { @@ -973,4 +972,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 2ecaa96c847c2b117bf1057d6caec54520fd592a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 22:17:52 +0100 Subject: [PATCH 265/326] Created using Colaboratory --- tutorial.ipynb | 134 ++++++++++++++++++++++++------------------------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index eb5b675db2be..9d5aa9c85c51 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -14,7 +14,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "13e0e8b77bf54b25b8893f0b4164315f": { + "300b4d5355ef4967bd5246afeef6eef5": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -29,14 +29,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_48037f2f7fea4012b9b341f6aee75297", - "IPY_MODEL_3f3b925287274893baf5ed7bb0cf6635", - "IPY_MODEL_c44bdca7c9784b20ba2146250ee744d6" + "IPY_MODEL_84e6829bb88845a8a4f42700b8496925", + "IPY_MODEL_c038e52d41bf4d5b9602930c3d074087", + "IPY_MODEL_2667604641764341b0bc8c6afea438fd" ], - "layout": "IPY_MODEL_5b0ed23cd32c4c7d8d9467b7425684ad" + "layout": "IPY_MODEL_98b3a4806ed14102b0d75e6c571d6134" } }, - "48037f2f7fea4012b9b341f6aee75297": { + "84e6829bb88845a8a4f42700b8496925": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -51,13 +51,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_1e10b4db5d644cb78bd6e005bb34038a", + "layout": "IPY_MODEL_c66a77395e42424d904699edcbb67291", "placeholder": "​", - "style": "IPY_MODEL_a58728093ecb4eafb826bee11a84c549", + "style": "IPY_MODEL_c4bbc15bf853439399dbcf1d40a5a407", "value": "100%" } }, - "3f3b925287274893baf5ed7bb0cf6635": { + "c038e52d41bf4d5b9602930c3d074087": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -73,15 +73,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_9ce169fe4b8543c0b26d745daa230f18", + "layout": "IPY_MODEL_0aaabfac395b43afbdd6d752c502bbf6", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_d5da01aca8fb400c96e76f44c9403581", + "style": "IPY_MODEL_3786d970492b4aa38f886f2572fd958c", "value": 818322941 } }, - "c44bdca7c9784b20ba2146250ee744d6": { + "2667604641764341b0bc8c6afea438fd": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -96,13 +96,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_98cbaa572fdd4c42975f52015672b3a5", + "layout": "IPY_MODEL_b86d0f2d7be74cebbcaa884b53123eeb", "placeholder": "​", - "style": "IPY_MODEL_a636aa81f5cc453099c9e552f0986e63", - "value": " 780M/780M [01:27<00:00, 6.98MB/s]" + "style": "IPY_MODEL_fa7b1497925a457f89286a71f073f416", + "value": " 780M/780M [00:57<00:00, 10.1MB/s]" } }, - "5b0ed23cd32c4c7d8d9467b7425684ad": { + "98b3a4806ed14102b0d75e6c571d6134": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -154,7 +154,7 @@ "width": null } }, - "1e10b4db5d644cb78bd6e005bb34038a": { + "c66a77395e42424d904699edcbb67291": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -206,7 +206,7 @@ "width": null } }, - "a58728093ecb4eafb826bee11a84c549": { + "c4bbc15bf853439399dbcf1d40a5a407": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -221,7 +221,7 @@ "description_width": "" } }, - "9ce169fe4b8543c0b26d745daa230f18": { + "0aaabfac395b43afbdd6d752c502bbf6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -273,7 +273,7 @@ "width": null } }, - "d5da01aca8fb400c96e76f44c9403581": { + "3786d970492b4aa38f886f2572fd958c": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -289,7 +289,7 @@ "description_width": "" } }, - "98cbaa572fdd4c42975f52015672b3a5": { + "b86d0f2d7be74cebbcaa884b53123eeb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -341,7 +341,7 @@ "width": null } }, - "a636aa81f5cc453099c9e552f0986e63": { + "fa7b1497925a457f89286a71f073f416": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -401,7 +401,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "bcb6db4a-fc21-4258-9b53-4a760a534656" + "outputId": "32e3bc15-6d02-4352-f0a3-912059d134a5" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -418,7 +418,7 @@ "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -459,7 +459,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "de684b46-7623-4836-ee44-49cdb320cbf3" + "outputId": "8e81d6e9-0360-4212-cd61-9a5a58d3f703" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", @@ -472,16 +472,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 162MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 19.5MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.2ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 13.3ms\n", - "Speed: 0.5ms pre-process, 15.2ms inference, 19.5ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.5ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 18.0ms\n", + "Speed: 0.5ms pre-process, 17.8ms inference, 17.6ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -515,20 +515,20 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "13e0e8b77bf54b25b8893f0b4164315f", - "48037f2f7fea4012b9b341f6aee75297", - "3f3b925287274893baf5ed7bb0cf6635", - "c44bdca7c9784b20ba2146250ee744d6", - "5b0ed23cd32c4c7d8d9467b7425684ad", - "1e10b4db5d644cb78bd6e005bb34038a", - "a58728093ecb4eafb826bee11a84c549", - "9ce169fe4b8543c0b26d745daa230f18", - "d5da01aca8fb400c96e76f44c9403581", - "98cbaa572fdd4c42975f52015672b3a5", - "a636aa81f5cc453099c9e552f0986e63" + "300b4d5355ef4967bd5246afeef6eef5", + "84e6829bb88845a8a4f42700b8496925", + "c038e52d41bf4d5b9602930c3d074087", + "2667604641764341b0bc8c6afea438fd", + "98b3a4806ed14102b0d75e6c571d6134", + "c66a77395e42424d904699edcbb67291", + "c4bbc15bf853439399dbcf1d40a5a407", + "0aaabfac395b43afbdd6d752c502bbf6", + "3786d970492b4aa38f886f2572fd958c", + "b86d0f2d7be74cebbcaa884b53123eeb", + "fa7b1497925a457f89286a71f073f416" ] }, - "outputId": "b1e02a1f-981f-4739-e75d-10d0204cc32d" + "outputId": "61ffec5e-90ea-44f6-c0ea-b006e6e7072f" }, "source": [ "# Download COCO val\n", @@ -546,7 +546,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "13e0e8b77bf54b25b8893f0b4164315f" + "model_id": "300b4d5355ef4967bd5246afeef6eef5" } }, "metadata": {} @@ -560,7 +560,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "9c2f755f-f383-4a9e-cd19-f73a0c763a9c" + "outputId": "aa5d5cea-14c1-4a19-bfdf-95b7164962cf" }, "source": [ "# Validate YOLOv5s on COCO val\n", @@ -573,30 +573,30 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2019.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2066.57it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.25it/s]\n", + " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.26it/s]\n", " all 5000 36335 0.67 0.521 0.566 0.371\n", - "Speed: 0.2ms pre-process, 2.7ms inference, 2.1ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 2.7ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.41s)\n", + "Done (t=0.82s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=6.19s)\n", + "DONE (t=5.49s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=75.81s).\n", + "DONE (t=74.26s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.26s).\n", + "DONE (t=13.46s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", @@ -676,7 +676,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "7d03d4d2-9a6e-47de-88f4-c673b55c73c5" + "outputId": "f0fcdc77-5326-41e1-bacc-be5432eefa2a" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", @@ -690,7 +690,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", @@ -699,8 +699,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 26.1MB/s]\n", - "Dataset download success ✅ (0.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 39.8MB/s]\n", + "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -734,11 +734,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1989.66it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 2084.63it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 246.25it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 18 Nov 2022 22:41:46 +0100 Subject: [PATCH 266/326] Created using Colaboratory --- segment/tutorial.ipynb | 70 +++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 38 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index f3f978d43d93..4192c69da628 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -42,14 +42,14 @@ "base_uri": "https://localhost:8080/" }, "id": "wbvMlHd_QwMG", - "outputId": "d1e33dfc-9ad4-436e-f1e5-01acee40c029" + "outputId": "664f49fa-554a-4dca-8d0e-5c9dd60f6d28" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -100,7 +100,7 @@ "base_uri": "https://localhost:8080/" }, "id": "zR9ZbuQCH7FX", - "outputId": "e206fcec-cf42-4754-8a42-39bc3603eba8" + "outputId": "6392c9ff-0863-4665-faf9-b3af9881c305" }, "outputs": [ { @@ -108,16 +108,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-seg.pt to yolov5s-seg.pt...\n", - "100% 14.9M/14.9M [00:03<00:00, 3.93MB/s]\n", + "100% 14.9M/14.9M [00:01<00:00, 9.09MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.2ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.7ms\n", - "Speed: 0.4ms pre-process, 15.5ms inference, 22.2ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.0ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.5ms\n", + "Speed: 0.5ms pre-process, 15.7ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" ] } @@ -155,7 +155,7 @@ "base_uri": "https://localhost:8080/" }, "id": "WQPtK1QYVaD_", - "outputId": "f7eba0ae-49d1-405b-a1cf-169212fadc2c" + "outputId": "4707734e-00c7-43da-d642-32c3c3fe3090" }, "outputs": [ { @@ -182,26 +182,23 @@ "base_uri": "https://localhost:8080/" }, "id": "X58w8JLpMnjH", - "outputId": "73533135-6995-4f2d-adb0-3acb5ef9b300" + "outputId": "f96b700d-c779-4a34-930b-e85be4e58974" }, "outputs": [ { - "metadata": { - "tags": null - }, - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1420.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1409.04it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:53<00:00, 1.38it/s]\n", " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", - "Speed: 0.9ms pre-process, 3.9ms inference, 3.0ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.8ms pre-process, 4.0ms inference, 2.8ms NMS per image at shape (32, 3, 640, 640)\n", "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" ] } @@ -273,27 +270,24 @@ "base_uri": "https://localhost:8080/" }, "id": "1NcFxRcFdJ_O", - "outputId": "8e349df5-9910-4a91-a845-748def15d3d7" + "outputId": "2cdb19cc-69af-4c90-f8de-af02dfedba91" }, "outputs": [ { - "metadata": { - "tags": null - }, - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", - "100% 6.79M/6.79M [00:01<00:00, 4.42MB/s]\n", - "Dataset download success ✅ (2.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.79M/6.79M [00:01<00:00, 5.87MB/s]\n", + "Dataset download success ✅ (2.1s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -327,11 +321,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1383.68it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1439.54it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 241.77it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017.cache' images and labels... 126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 18 Nov 2022 23:12:09 +0100 Subject: [PATCH 267/326] Created using Colaboratory --- classify/tutorial.ipynb | 3254 +++++++++++++++++---------------------- 1 file changed, 1445 insertions(+), 1809 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index f235b754d7b4..e035a7bda40d 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1,1842 +1,1478 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Run\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] }, - "id": "wbvMlHd_QwMG", - "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" - }, - "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 152.0/196.6 GB disk)\n" - ] - } - ], - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Predict\n", - "\n", - "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", - "\n", - "```shell\n", - "python classify/predict.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " screen # screenshot\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "43b2e1b5-78d9-4e1d-8530-ee9779bba160" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] }, - "id": "zR9ZbuQCH7FX", - "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "Fusing layers... \n", - "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x640 minibus 0.01, recreational vehicle 0.01, ambulance 0.01, tram 0.01, trolleybus 0.01, 2.6ms\n", - "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 640x640 suit 0.05, bow tie 0.01, ping-pong ball 0.01, microphone 0.01, bassoon 0.01, 2.8ms\n", - "Speed: 1.2ms pre-process, 2.7ms inference, 0.1ms NMS per image at shape (1, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", - "2 labels saved to runs/predict-cls/exp/labels\n" - ] - } - ], - "source": [ - "!python classify/predict.py --weights yolov5s-cls.pt --img 640 --source data/images\n", - "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "9b8caa3522fc4cbab31e13b5dfc7808d", - "574140e4c4bc48c9a171541a02cd0211", - "35e03ce5090346c9ae602891470fc555", - "c942c208e72d46568b476bb0f2d75496", - "65881db1db8a4e9c930fab9172d45143", - "60b913d755b34d638478e30705a2dde1", - "0856bea36ec148b68522ff9c9eb258d8", - "76879f6f2aa54637a7a07faeea2bd684", - "0ace3934ec6f4d36a1b3a9e086390926", - "d6b7a2243e0c4beca714d99dceec23d6", - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" - ] + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "\n", + "```shell\n", + "python classify/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] }, - "id": "WQPtK1QYVaD_", - "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" - }, - "outputs": [], - "source": [ - "# Download Imagenet val\n", - "!bash data/scripts/get_imagenet.sh --val" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "1b610787-7cf7-4c33-aac2-aa50fbb84a94" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt to yolov5s-cls.pt...\n", + "100% 10.5M/10.5M [00:03<00:00, 2.94MB/s]\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.1ms\n", + "Speed: 0.3ms pre-process, 4.0ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", + "2 labels saved to runs/predict-cls/exp/labels\n" + ] + } + ], + "source": [ + "!python classify/predict.py --weights yolov5s-cls.pt --img 224 --source data/images\n", + "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" + ] }, - "id": "X58w8JLpMnjH", - "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=320, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "Fusing layers... \n", - "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "validating: 100%|██████████| 391/391 [02:36<00:00, 2.49it/s] \n", - " Class Images top1_acc top5_acc\n", - " all 50000 0.734 0.914\n", - " tench 50 0.92 0.98\n", - " goldfish 50 0.86 0.98\n", - " great white shark 50 0.76 0.94\n", - " tiger shark 50 0.84 0.96\n", - " hammerhead shark 50 0.88 0.98\n", - " electric ray 50 0.76 0.88\n", - " stingray 50 0.74 0.94\n", - " cock 50 0.74 0.94\n", - " hen 50 0.86 0.96\n", - " ostrich 50 0.98 1\n", - " brambling 50 0.9 0.98\n", - " goldfinch 50 0.92 1\n", - " house finch 50 0.92 1\n", - " junco 50 0.98 1\n", - " indigo bunting 50 0.86 0.94\n", - " American robin 50 0.94 1\n", - " bulbul 50 0.88 0.92\n", - " jay 50 0.92 0.98\n", - " magpie 50 0.9 0.98\n", - " chickadee 50 0.96 1\n", - " American dipper 50 0.86 0.92\n", - " kite 50 0.8 0.94\n", - " bald eagle 50 0.9 0.98\n", - " vulture 50 0.96 1\n", - " great grey owl 50 0.96 0.98\n", - " fire salamander 50 0.96 0.98\n", - " smooth newt 50 0.66 0.98\n", - " newt 50 0.74 0.84\n", - " spotted salamander 50 0.9 0.98\n", - " axolotl 50 0.9 0.98\n", - " American bullfrog 50 0.8 0.92\n", - " tree frog 50 0.8 0.94\n", - " tailed frog 50 0.5 0.82\n", - " loggerhead sea turtle 50 0.7 0.92\n", - " leatherback sea turtle 50 0.58 0.8\n", - " mud turtle 50 0.58 0.84\n", - " terrapin 50 0.52 0.98\n", - " box turtle 50 0.88 1\n", - " banded gecko 50 0.78 0.9\n", - " green iguana 50 0.78 0.92\n", - " Carolina anole 50 0.62 0.98\n", - "desert grassland whiptail lizard 50 0.88 0.96\n", - " agama 50 0.78 0.96\n", - " frilled-necked lizard 50 0.82 0.94\n", - " alligator lizard 50 0.64 0.84\n", - " Gila monster 50 0.76 0.86\n", - " European green lizard 50 0.5 0.96\n", - " chameleon 50 0.78 0.9\n", - " Komodo dragon 50 0.9 1\n", - " Nile crocodile 50 0.66 0.92\n", - " American alligator 50 0.78 0.98\n", - " triceratops 50 0.96 0.98\n", - " worm snake 50 0.76 0.9\n", - " ring-necked snake 50 0.84 0.96\n", - " eastern hog-nosed snake 50 0.62 0.86\n", - " smooth green snake 50 0.64 0.96\n", - " kingsnake 50 0.78 0.94\n", - " garter snake 50 0.86 0.98\n", - " water snake 50 0.78 0.92\n", - " vine snake 50 0.72 0.86\n", - " night snake 50 0.34 0.86\n", - " boa constrictor 50 0.8 0.96\n", - " African rock python 50 0.52 0.82\n", - " Indian cobra 50 0.8 0.94\n", - " green mamba 50 0.56 0.92\n", - " sea snake 50 0.76 0.94\n", - " Saharan horned viper 50 0.48 0.88\n", - "eastern diamondback rattlesnake 50 0.72 0.92\n", - " sidewinder 50 0.38 0.92\n", - " trilobite 50 0.98 0.98\n", - " harvestman 50 0.86 0.94\n", - " scorpion 50 0.88 0.94\n", - " yellow garden spider 50 0.88 0.96\n", - " barn spider 50 0.38 0.96\n", - " European garden spider 50 0.6 0.98\n", - " southern black widow 50 0.84 0.98\n", - " tarantula 50 0.94 0.98\n", - " wolf spider 50 0.7 0.92\n", - " tick 50 0.76 0.82\n", - " centipede 50 0.74 0.86\n", - " black grouse 50 0.88 0.98\n", - " ptarmigan 50 0.84 0.98\n", - " ruffed grouse 50 0.9 1\n", - " prairie grouse 50 0.9 0.96\n", - " peacock 50 0.9 0.9\n", - " quail 50 0.88 0.94\n", - " partridge 50 0.66 0.94\n", - " grey parrot 50 0.94 0.98\n", - " macaw 50 0.92 0.98\n", - "sulphur-crested cockatoo 50 0.94 0.98\n", - " lorikeet 50 0.98 1\n", - " coucal 50 0.9 0.92\n", - " bee eater 50 0.96 0.98\n", - " hornbill 50 0.86 0.98\n", - " hummingbird 50 0.9 0.98\n", - " jacamar 50 0.94 0.94\n", - " toucan 50 0.84 0.94\n", - " duck 50 0.78 0.94\n", - " red-breasted merganser 50 0.94 0.98\n", - " goose 50 0.76 0.98\n", - " black swan 50 0.94 1\n", - " tusker 50 0.58 0.92\n", - " echidna 50 1 1\n", - " platypus 50 0.72 0.84\n", - " wallaby 50 0.86 0.92\n", - " koala 50 0.84 0.98\n", - " wombat 50 0.82 0.86\n", - " jellyfish 50 0.94 0.96\n", - " sea anemone 50 0.66 0.98\n", - " brain coral 50 0.9 0.96\n", - " flatworm 50 0.76 1\n", - " nematode 50 0.9 0.92\n", - " conch 50 0.74 0.92\n", - " snail 50 0.78 0.86\n", - " slug 50 0.78 0.9\n", - " sea slug 50 0.94 0.98\n", - " chiton 50 0.86 0.96\n", - " chambered nautilus 50 0.86 0.94\n", - " Dungeness crab 50 0.86 0.96\n", - " rock crab 50 0.66 0.88\n", - " fiddler crab 50 0.64 0.88\n", - " red king crab 50 0.78 0.92\n", - " American lobster 50 0.78 0.96\n", - " spiny lobster 50 0.78 0.88\n", - " crayfish 50 0.56 0.84\n", - " hermit crab 50 0.82 0.96\n", - " isopod 50 0.62 0.74\n", - " white stork 50 0.88 0.94\n", - " black stork 50 0.86 0.96\n", - " spoonbill 50 0.96 1\n", - " flamingo 50 0.94 1\n", - " little blue heron 50 0.92 0.98\n", - " great egret 50 0.9 0.98\n", - " bittern 50 0.9 0.92\n", - " crane (bird) 50 0.64 0.94\n", - " limpkin 50 0.96 0.98\n", - " common gallinule 50 0.96 0.96\n", - " American coot 50 0.94 1\n", - " bustard 50 0.96 0.98\n", - " ruddy turnstone 50 0.96 1\n", - " dunlin 50 0.86 0.94\n", - " common redshank 50 0.92 0.96\n", - " dowitcher 50 0.9 1\n", - " oystercatcher 50 0.9 0.96\n", - " pelican 50 0.96 1\n", - " king penguin 50 0.88 0.92\n", - " albatross 50 0.9 0.98\n", - " grey whale 50 0.86 0.94\n", - " killer whale 50 0.9 0.98\n", - " dugong 50 0.88 0.94\n", - " sea lion 50 0.78 0.98\n", - " Chihuahua 50 0.56 0.82\n", - " Japanese Chin 50 0.7 0.98\n", - " Maltese 50 0.86 0.94\n", - " Pekingese 50 0.84 0.94\n", - " Shih Tzu 50 0.68 0.94\n", - " King Charles Spaniel 50 0.92 0.98\n", - " Papillon 50 0.92 0.94\n", - " toy terrier 50 0.48 0.96\n", - " Rhodesian Ridgeback 50 0.76 0.94\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " Afghan Hound 50 0.9 0.98\n", - " Basset Hound 50 0.78 0.9\n", - " Beagle 50 0.82 0.98\n", - " Bloodhound 50 0.5 0.78\n", - " Bluetick Coonhound 50 0.84 0.94\n", - " Black and Tan Coonhound 50 0.46 0.8\n", - "Treeing Walker Coonhound 50 0.58 0.98\n", - " English foxhound 50 0.24 0.8\n", - " Redbone Coonhound 50 0.66 0.92\n", - " borzoi 50 0.94 1\n", - " Irish Wolfhound 50 0.64 0.9\n", - " Italian Greyhound 50 0.8 0.98\n", - " Whippet 50 0.82 0.98\n", - " Ibizan Hound 50 0.64 0.92\n", - " Norwegian Elkhound 50 0.88 1\n", - " Otterhound 50 0.58 0.9\n", - " Saluki 50 0.72 0.92\n", - " Scottish Deerhound 50 0.86 1\n", - " Weimaraner 50 0.88 0.96\n", - "Staffordshire Bull Terrier 50 0.62 0.92\n", - "American Staffordshire Terrier 50 0.66 0.92\n", - " Bedlington Terrier 50 0.82 0.96\n", - " Border Terrier 50 0.9 0.98\n", - " Kerry Blue Terrier 50 0.82 1\n", - " Irish Terrier 50 0.74 0.94\n", - " Norfolk Terrier 50 0.74 0.92\n", - " Norwich Terrier 50 0.68 0.98\n", - " Yorkshire Terrier 50 0.66 0.88\n", - " Wire Fox Terrier 50 0.66 0.96\n", - " Lakeland Terrier 50 0.82 0.94\n", - " Sealyham Terrier 50 0.74 0.9\n", - " Airedale Terrier 50 0.82 0.9\n", - " Cairn Terrier 50 0.82 0.94\n", - " Australian Terrier 50 0.48 0.84\n", - " Dandie Dinmont Terrier 50 0.84 0.9\n", - " Boston Terrier 50 0.88 1\n", - " Miniature Schnauzer 50 0.7 0.92\n", - " Giant Schnauzer 50 0.82 1\n", - " Standard Schnauzer 50 0.72 0.98\n", - " Scottish Terrier 50 0.78 0.94\n", - " Tibetan Terrier 50 0.64 0.98\n", - "Australian Silky Terrier 50 0.72 0.96\n", - "Soft-coated Wheaten Terrier 50 0.86 0.98\n", - "West Highland White Terrier 50 0.94 0.98\n", - " Lhasa Apso 50 0.66 0.96\n", - " Flat-Coated Retriever 50 0.78 1\n", - " Curly-coated Retriever 50 0.84 0.96\n", - " Golden Retriever 50 0.88 0.96\n", - " Labrador Retriever 50 0.82 0.94\n", - "Chesapeake Bay Retriever 50 0.86 0.98\n", - "German Shorthaired Pointer 50 0.84 0.96\n", - " Vizsla 50 0.7 0.94\n", - " English Setter 50 0.8 1\n", - " Irish Setter 50 0.78 0.9\n", - " Gordon Setter 50 0.84 0.92\n", - " Brittany 50 0.86 0.98\n", - " Clumber Spaniel 50 0.9 0.96\n", - "English Springer Spaniel 50 0.96 1\n", - " Welsh Springer Spaniel 50 0.92 1\n", - " Cocker Spaniels 50 0.7 0.96\n", - " Sussex Spaniel 50 0.7 0.88\n", - " Irish Water Spaniel 50 0.86 0.94\n", - " Kuvasz 50 0.7 0.92\n", - " Schipperke 50 0.94 0.98\n", - " Groenendael 50 0.78 0.92\n", - " Malinois 50 0.92 0.98\n", - " Briard 50 0.6 0.84\n", - " Australian Kelpie 50 0.74 0.96\n", - " Komondor 50 0.9 0.96\n", - " Old English Sheepdog 50 0.94 0.98\n", - " Shetland Sheepdog 50 0.72 0.94\n", - " collie 50 0.6 0.96\n", - " Border Collie 50 0.82 0.96\n", - " Bouvier des Flandres 50 0.78 0.96\n", - " Rottweiler 50 0.94 0.98\n", - " German Shepherd Dog 50 0.76 0.98\n", - " Dobermann 50 0.74 1\n", - " Miniature Pinscher 50 0.76 0.96\n", - "Greater Swiss Mountain Dog 50 0.66 0.94\n", - " Bernese Mountain Dog 50 0.94 1\n", - " Appenzeller Sennenhund 50 0.3 1\n", - " Entlebucher Sennenhund 50 0.72 0.98\n", - " Boxer 50 0.7 0.92\n", - " Bullmastiff 50 0.8 0.98\n", - " Tibetan Mastiff 50 0.92 0.98\n", - " French Bulldog 50 0.86 0.98\n", - " Great Dane 50 0.6 0.92\n", - " St. Bernard 50 0.94 1\n", - " husky 50 0.5 0.94\n", - " Alaskan Malamute 50 0.76 0.96\n", - " Siberian Husky 50 0.56 0.98\n", - " Dalmatian 50 0.94 0.98\n", - " Affenpinscher 50 0.76 0.92\n", - " Basenji 50 0.9 1\n", - " pug 50 0.96 0.98\n", - " Leonberger 50 0.98 1\n", - " Newfoundland 50 0.82 0.96\n", - " Pyrenean Mountain Dog 50 0.76 0.94\n", - " Samoyed 50 0.9 0.98\n", - " Pomeranian 50 0.96 1\n", - " Chow Chow 50 0.88 0.96\n", - " Keeshond 50 0.94 1\n", - " Griffon Bruxellois 50 0.92 0.98\n", - " Pembroke Welsh Corgi 50 0.9 0.98\n", - " Cardigan Welsh Corgi 50 0.7 0.94\n", - " Toy Poodle 50 0.52 0.96\n", - " Miniature Poodle 50 0.56 0.92\n", - " Standard Poodle 50 0.78 0.96\n", - " Mexican hairless dog 50 0.86 0.98\n", - " grey wolf 50 0.74 0.92\n", - " Alaskan tundra wolf 50 0.86 0.98\n", - " red wolf 50 0.54 0.92\n", - " coyote 50 0.62 0.82\n", - " dingo 50 0.76 0.94\n", - " dhole 50 0.9 0.96\n", - " African wild dog 50 1 1\n", - " hyena 50 0.9 0.94\n", - " red fox 50 0.62 0.92\n", - " kit fox 50 0.7 0.98\n", - " Arctic fox 50 0.92 0.98\n", - " grey fox 50 0.66 0.96\n", - " tabby cat 50 0.58 0.92\n", - " tiger cat 50 0.2 0.94\n", - " Persian cat 50 0.92 1\n", - " Siamese cat 50 0.94 0.98\n", - " Egyptian Mau 50 0.52 0.84\n", - " cougar 50 0.94 0.96\n", - " lynx 50 0.74 0.9\n", - " leopard 50 0.86 1\n", - " snow leopard 50 0.9 0.98\n", - " jaguar 50 0.72 0.92\n", - " lion 50 0.9 0.98\n", - " tiger 50 0.96 0.98\n", - " cheetah 50 0.94 0.98\n", - " brown bear 50 0.9 0.98\n", - " American black bear 50 0.9 0.98\n", - " polar bear 50 0.86 0.94\n", - " sloth bear 50 0.72 0.92\n", - " mongoose 50 0.7 0.86\n", - " meerkat 50 0.82 0.98\n", - " tiger beetle 50 0.9 0.94\n", - " ladybug 50 0.78 0.98\n", - " ground beetle 50 0.62 0.94\n", - " longhorn beetle 50 0.58 0.9\n", - " leaf beetle 50 0.66 0.98\n", - " dung beetle 50 0.88 0.98\n", - " rhinoceros beetle 50 0.88 1\n", - " weevil 50 0.92 1\n", - " fly 50 0.78 0.94\n", - " bee 50 0.8 0.96\n", - " ant 50 0.68 0.84\n", - " grasshopper 50 0.48 0.9\n", - " cricket 50 0.66 0.94\n", - " stick insect 50 0.7 0.94\n", - " cockroach 50 0.72 0.84\n", - " mantis 50 0.72 0.9\n", - " cicada 50 0.9 0.96\n", - " leafhopper 50 0.9 0.96\n", - " lacewing 50 0.8 0.94\n", - " dragonfly 50 0.76 0.98\n", - " damselfly 50 0.82 1\n", - " red admiral 50 0.96 0.96\n", - " ringlet 50 0.88 1\n", - " monarch butterfly 50 0.9 0.96\n", - " small white 50 0.88 1\n", - " sulphur butterfly 50 0.92 1\n", - "gossamer-winged butterfly 50 0.9 1\n", - " starfish 50 0.82 0.94\n", - " sea urchin 50 0.84 0.98\n", - " sea cucumber 50 0.76 0.92\n", - " cottontail rabbit 50 0.7 0.98\n", - " hare 50 0.9 1\n", - " Angora rabbit 50 0.92 0.98\n", - " hamster 50 1 1\n", - " porcupine 50 0.9 0.98\n", - " fox squirrel 50 0.82 0.96\n", - " marmot 50 0.94 0.96\n", - " beaver 50 0.78 0.96\n", - " guinea pig 50 0.78 0.92\n", - " common sorrel 50 0.98 0.98\n", - " zebra 50 0.96 0.98\n", - " pig 50 0.54 0.82\n", - " wild boar 50 0.86 0.96\n", - " warthog 50 0.96 0.96\n", - " hippopotamus 50 0.9 1\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " ox 50 0.52 0.94\n", - " water buffalo 50 0.86 0.94\n", - " bison 50 0.9 0.98\n", - " ram 50 0.62 0.98\n", - " bighorn sheep 50 0.72 1\n", - " Alpine ibex 50 0.96 0.98\n", - " hartebeest 50 0.94 1\n", - " impala 50 0.86 0.98\n", - " gazelle 50 0.74 0.96\n", - " dromedary 50 0.94 1\n", - " llama 50 0.86 0.94\n", - " weasel 50 0.42 0.96\n", - " mink 50 0.78 0.92\n", - " European polecat 50 0.54 0.88\n", - " black-footed ferret 50 0.74 0.96\n", - " otter 50 0.68 0.9\n", - " skunk 50 0.94 0.96\n", - " badger 50 0.88 0.92\n", - " armadillo 50 0.88 0.96\n", - " three-toed sloth 50 0.96 1\n", - " orangutan 50 0.82 0.9\n", - " gorilla 50 0.78 0.94\n", - " chimpanzee 50 0.86 0.94\n", - " gibbon 50 0.74 0.9\n", - " siamang 50 0.68 0.94\n", - " guenon 50 0.82 0.96\n", - " patas monkey 50 0.66 0.86\n", - " baboon 50 0.88 0.96\n", - " macaque 50 0.72 0.84\n", - " langur 50 0.56 0.78\n", - " black-and-white colobus 50 0.84 0.92\n", - " proboscis monkey 50 0.98 1\n", - " marmoset 50 0.7 0.92\n", - " white-headed capuchin 50 0.82 0.94\n", - " howler monkey 50 0.9 0.96\n", - " titi 50 0.54 0.9\n", - "Geoffroy's spider monkey 50 0.36 0.86\n", - " common squirrel monkey 50 0.76 0.92\n", - " ring-tailed lemur 50 0.7 0.94\n", - " indri 50 0.86 0.98\n", - " Asian elephant 50 0.54 0.96\n", - " African bush elephant 50 0.62 0.96\n", - " red panda 50 0.94 0.94\n", - " giant panda 50 0.92 0.98\n", - " snoek 50 0.76 0.9\n", - " eel 50 0.58 0.86\n", - " coho salmon 50 0.8 0.98\n", - " rock beauty 50 0.8 0.96\n", - " clownfish 50 0.8 0.98\n", - " sturgeon 50 0.76 0.96\n", - " garfish 50 0.7 0.82\n", - " lionfish 50 0.94 0.98\n", - " pufferfish 50 0.86 0.98\n", - " abacus 50 0.8 0.88\n", - " abaya 50 0.72 0.94\n", - " academic gown 50 0.44 0.94\n", - " accordion 50 0.78 0.96\n", - " acoustic guitar 50 0.54 0.78\n", - " aircraft carrier 50 0.7 0.98\n", - " airliner 50 0.92 1\n", - " airship 50 0.8 0.88\n", - " altar 50 0.6 0.94\n", - " ambulance 50 0.84 0.98\n", - " amphibious vehicle 50 0.68 0.9\n", - " analog clock 50 0.5 0.88\n", - " apiary 50 0.9 1\n", - " apron 50 0.68 0.86\n", - " waste container 50 0.6 0.86\n", - " assault rifle 50 0.36 0.9\n", - " backpack 50 0.36 0.72\n", - " bakery 50 0.38 0.64\n", - " balance beam 50 0.84 0.98\n", - " balloon 50 0.88 0.96\n", - " ballpoint pen 50 0.52 0.96\n", - " Band-Aid 50 0.68 0.96\n", - " banjo 50 0.9 1\n", - " baluster 50 0.74 0.94\n", - " barbell 50 0.58 0.9\n", - " barber chair 50 0.72 0.9\n", - " barbershop 50 0.64 0.9\n", - " barn 50 0.96 0.96\n", - " barometer 50 0.86 0.96\n", - " barrel 50 0.64 0.86\n", - " wheelbarrow 50 0.64 0.92\n", - " baseball 50 0.76 0.96\n", - " basketball 50 0.88 0.98\n", - " bassinet 50 0.8 0.94\n", - " bassoon 50 0.84 0.98\n", - " swimming cap 50 0.7 0.88\n", - " bath towel 50 0.56 0.84\n", - " bathtub 50 0.34 0.86\n", - " station wagon 50 0.68 0.9\n", - " lighthouse 50 0.74 0.96\n", - " beaker 50 0.46 0.7\n", - " military cap 50 0.88 0.98\n", - " beer bottle 50 0.72 0.9\n", - " beer glass 50 0.72 0.9\n", - " bell-cot 50 0.6 0.96\n", - " bib 50 0.58 0.86\n", - " tandem bicycle 50 0.76 0.96\n", - " bikini 50 0.52 0.88\n", - " ring binder 50 0.7 0.86\n", - " binoculars 50 0.54 0.78\n", - " birdhouse 50 0.86 0.96\n", - " boathouse 50 0.78 0.96\n", - " bobsleigh 50 0.94 0.96\n", - " bolo tie 50 0.86 0.88\n", - " poke bonnet 50 0.68 0.88\n", - " bookcase 50 0.68 0.92\n", - " bookstore 50 0.58 0.88\n", - " bottle cap 50 0.62 0.8\n", - " bow 50 0.74 0.84\n", - " bow tie 50 0.68 0.92\n", - " brass 50 0.92 0.98\n", - " bra 50 0.52 0.76\n", - " breakwater 50 0.64 0.94\n", - " breastplate 50 0.36 0.9\n", - " broom 50 0.58 0.84\n", - " bucket 50 0.58 0.88\n", - " buckle 50 0.5 0.76\n", - " bulletproof vest 50 0.52 0.76\n", - " high-speed train 50 0.94 0.98\n", - " butcher shop 50 0.76 0.94\n", - " taxicab 50 0.7 0.92\n", - " cauldron 50 0.5 0.72\n", - " candle 50 0.5 0.76\n", - " cannon 50 0.88 0.96\n", - " canoe 50 0.94 1\n", - " can opener 50 0.72 0.88\n", - " cardigan 50 0.66 0.88\n", - " car mirror 50 0.94 0.98\n", - " carousel 50 0.96 0.96\n", - " tool kit 50 0.68 0.84\n", - " carton 50 0.44 0.78\n", - " car wheel 50 0.4 0.78\n", - "automated teller machine 50 0.82 0.94\n", - " cassette 50 0.62 0.84\n", - " cassette player 50 0.3 0.92\n", - " castle 50 0.74 0.9\n", - " catamaran 50 0.74 0.98\n", - " CD player 50 0.52 0.8\n", - " cello 50 0.84 1\n", - " mobile phone 50 0.72 0.86\n", - " chain 50 0.34 0.78\n", - " chain-link fence 50 0.7 0.86\n", - " chain mail 50 0.68 0.86\n", - " chainsaw 50 0.88 0.96\n", - " chest 50 0.7 0.88\n", - " chiffonier 50 0.32 0.64\n", - " chime 50 0.64 0.84\n", - " china cabinet 50 0.78 0.94\n", - " Christmas stocking 50 0.92 0.98\n", - " church 50 0.6 0.86\n", - " movie theater 50 0.68 0.9\n", - " cleaver 50 0.36 0.68\n", - " cliff dwelling 50 0.86 1\n", - " cloak 50 0.28 0.7\n", - " clogs 50 0.6 0.88\n", - " cocktail shaker 50 0.62 0.76\n", - " coffee mug 50 0.48 0.78\n", - " coffeemaker 50 0.62 0.92\n", - " coil 50 0.64 0.86\n", - " combination lock 50 0.62 0.92\n", - " computer keyboard 50 0.72 0.92\n", - " confectionery store 50 0.56 0.84\n", - " container ship 50 0.82 0.98\n", - " convertible 50 0.78 1\n", - " corkscrew 50 0.84 0.98\n", - " cornet 50 0.56 0.98\n", - " cowboy boot 50 0.66 0.78\n", - " cowboy hat 50 0.66 0.88\n", - " cradle 50 0.34 0.8\n", - " crane (machine) 50 0.8 0.92\n", - " crash helmet 50 0.92 0.96\n", - " crate 50 0.6 0.86\n", - " infant bed 50 0.8 0.96\n", - " Crock Pot 50 0.78 0.88\n", - " croquet ball 50 0.9 1\n", - " crutch 50 0.42 0.7\n", - " cuirass 50 0.54 0.92\n", - " dam 50 0.78 0.92\n", - " desk 50 0.68 0.88\n", - " desktop computer 50 0.54 0.9\n", - " rotary dial telephone 50 0.92 0.96\n", - " diaper 50 0.68 0.84\n", - " digital clock 50 0.6 0.8\n", - " digital watch 50 0.56 0.82\n" - ] + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "WQPtK1QYVaD_", + "outputId": "92de5f34-cf41-49e7-b679-41db94e995ac" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2022-11-18 21:48:38-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", + "Resolving image-net.org (image-net.org)... 171.64.68.16\n", + "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 6744924160 (6.3G) [application/x-tar]\n", + "Saving to: ‘ILSVRC2012_img_val.tar’\n", + "\n", + "ILSVRC2012_img_val. 100%[===================>] 6.28G 7.15MB/s in 11m 13s \n", + "\n", + "2022-11-18 21:59:52 (9.55 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", + "\n" + ] + } + ], + "source": [ + "# Download Imagenet val (6.3G, 50000 images)\n", + "!bash data/scripts/get_imagenet.sh --val" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " dining table 50 0.78 0.88\n", - " dishcloth 50 0.98 1\n", - " dishwasher 50 0.52 0.74\n", - " disc brake 50 0.96 1\n", - " dock 50 0.56 0.96\n", - " dog sled 50 0.9 0.98\n", - " dome 50 0.74 0.96\n", - " doormat 50 0.6 0.82\n", - " drilling rig 50 0.82 0.94\n", - " drum 50 0.4 0.72\n", - " drumstick 50 0.56 0.82\n", - " dumbbell 50 0.6 0.92\n", - " Dutch oven 50 0.66 0.88\n", - " electric fan 50 0.82 0.84\n", - " electric guitar 50 0.66 0.92\n", - " electric locomotive 50 0.92 0.98\n", - " entertainment center 50 0.92 1\n", - " envelope 50 0.58 0.88\n", - " espresso machine 50 0.72 0.94\n", - " face powder 50 0.76 0.92\n", - " feather boa 50 0.8 0.88\n", - " filing cabinet 50 0.84 0.98\n", - " fireboat 50 0.96 0.96\n", - " fire engine 50 0.82 0.92\n", - " fire screen sheet 50 0.52 0.78\n", - " flagpole 50 0.76 0.92\n", - " flute 50 0.4 0.76\n", - " folding chair 50 0.68 0.9\n", - " football helmet 50 0.9 0.96\n", - " forklift 50 0.8 0.94\n", - " fountain 50 0.88 0.92\n", - " fountain pen 50 0.76 0.92\n", - " four-poster bed 50 0.82 0.92\n", - " freight car 50 0.98 0.98\n", - " French horn 50 0.76 0.92\n", - " frying pan 50 0.48 0.82\n", - " fur coat 50 0.86 0.96\n", - " garbage truck 50 0.9 0.98\n", - " gas mask 50 0.82 0.92\n", - " gas pump 50 0.82 0.98\n", - " goblet 50 0.64 0.9\n", - " go-kart 50 0.9 1\n", - " golf ball 50 0.86 0.96\n", - " golf cart 50 0.76 0.9\n", - " gondola 50 0.94 0.98\n", - " gong 50 0.74 0.92\n", - " gown 50 0.72 0.94\n", - " grand piano 50 0.74 0.96\n", - " greenhouse 50 0.84 1\n", - " grille 50 0.72 0.88\n", - " grocery store 50 0.68 0.9\n", - " guillotine 50 0.84 0.94\n", - " barrette 50 0.48 0.68\n", - " hair spray 50 0.4 0.76\n", - " half-track 50 0.76 0.96\n", - " hammer 50 0.54 0.78\n", - " hamper 50 0.72 0.9\n", - " hair dryer 50 0.7 0.8\n", - " hand-held computer 50 0.52 0.88\n", - " handkerchief 50 0.8 0.96\n", - " hard disk drive 50 0.78 0.86\n", - " harmonica 50 0.68 0.96\n", - " harp 50 0.9 0.96\n", - " harvester 50 0.86 1\n", - " hatchet 50 0.6 0.84\n", - " holster 50 0.7 0.84\n", - " home theater 50 0.72 0.96\n", - " honeycomb 50 0.74 0.86\n", - " hook 50 0.28 0.62\n", - " hoop skirt 50 0.68 0.8\n", - " horizontal bar 50 0.76 0.98\n", - " horse-drawn vehicle 50 0.9 0.9\n", - " hourglass 50 0.92 0.98\n", - " iPod 50 0.9 0.94\n", - " clothes iron 50 0.72 0.9\n", - " jack-o'-lantern 50 0.94 0.98\n", - " jeans 50 0.7 0.82\n", - " jeep 50 0.76 0.9\n", - " T-shirt 50 0.72 0.94\n", - " jigsaw puzzle 50 0.92 0.96\n", - " pulled rickshaw 50 0.88 0.96\n", - " joystick 50 0.74 0.98\n", - " kimono 50 0.78 0.94\n", - " knee pad 50 0.7 0.86\n", - " knot 50 0.8 0.86\n", - " lab coat 50 0.82 0.98\n", - " ladle 50 0.26 0.64\n", - " lampshade 50 0.62 0.8\n", - " laptop computer 50 0.2 0.88\n", - " lawn mower 50 0.8 0.96\n", - " lens cap 50 0.5 0.8\n", - " paper knife 50 0.3 0.58\n", - " library 50 0.62 0.92\n", - " lifeboat 50 0.94 0.98\n", - " lighter 50 0.56 0.8\n", - " limousine 50 0.74 0.92\n", - " ocean liner 50 0.88 0.96\n", - " lipstick 50 0.7 0.88\n", - " slip-on shoe 50 0.82 0.94\n", - " lotion 50 0.56 0.9\n", - " speaker 50 0.58 0.64\n", - " loupe 50 0.32 0.54\n", - " sawmill 50 0.74 0.9\n", - " magnetic compass 50 0.48 0.78\n", - " mail bag 50 0.64 0.94\n", - " mailbox 50 0.82 0.92\n", - " tights 50 0.28 0.9\n", - " tank suit 50 0.3 0.88\n", - " manhole cover 50 0.94 0.98\n", - " maraca 50 0.72 0.86\n", - " marimba 50 0.84 0.94\n", - " mask 50 0.48 0.78\n", - " match 50 0.74 0.92\n", - " maypole 50 0.96 1\n", - " maze 50 0.82 1\n", - " measuring cup 50 0.66 0.82\n", - " medicine chest 50 0.6 0.9\n", - " megalith 50 0.84 0.92\n", - " microphone 50 0.56 0.74\n", - " microwave oven 50 0.56 0.8\n", - " military uniform 50 0.62 0.86\n", - " milk can 50 0.7 0.82\n", - " minibus 50 0.68 1\n", - " miniskirt 50 0.58 0.84\n", - " minivan 50 0.48 0.8\n", - " missile 50 0.34 0.82\n", - " mitten 50 0.76 0.88\n", - " mixing bowl 50 0.82 0.98\n", - " mobile home 50 0.58 0.8\n", - " Model T 50 0.92 0.96\n", - " modem 50 0.7 0.9\n", - " monastery 50 0.52 0.86\n", - " monitor 50 0.34 0.86\n", - " moped 50 0.56 0.94\n", - " mortar 50 0.72 0.88\n", - " square academic cap 50 0.48 0.82\n", - " mosque 50 0.98 1\n", - " mosquito net 50 0.96 0.98\n", - " scooter 50 0.88 0.98\n", - " mountain bike 50 0.74 0.96\n", - " tent 50 0.88 0.96\n", - " computer mouse 50 0.38 0.82\n", - " mousetrap 50 0.82 0.9\n", - " moving van 50 0.48 0.8\n", - " muzzle 50 0.5 0.74\n", - " nail 50 0.68 0.76\n", - " neck brace 50 0.62 0.72\n", - " necklace 50 0.92 1\n", - " nipple 50 0.8 0.92\n", - " notebook computer 50 0.34 0.88\n", - " obelisk 50 0.82 0.94\n", - " oboe 50 0.62 0.84\n", - " ocarina 50 0.82 0.88\n", - " odometer 50 0.98 1\n", - " oil filter 50 0.6 0.82\n", - " organ 50 0.84 0.94\n", - " oscilloscope 50 0.94 0.96\n", - " overskirt 50 0.2 0.62\n", - " bullock cart 50 0.76 0.94\n", - " oxygen mask 50 0.48 0.8\n", - " packet 50 0.54 0.74\n", - " paddle 50 0.7 0.94\n", - " paddle wheel 50 0.92 0.98\n", - " padlock 50 0.64 0.78\n", - " paintbrush 50 0.66 0.78\n", - " pajamas 50 0.68 0.94\n", - " palace 50 0.66 0.94\n", - " pan flute 50 0.84 0.86\n", - " paper towel 50 0.68 0.86\n", - " parachute 50 0.92 0.96\n", - " parallel bars 50 0.68 0.96\n", - " park bench 50 0.82 0.94\n", - " parking meter 50 0.86 0.98\n", - " passenger car 50 0.48 0.86\n" - ] + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "9961ad87-d639-4489-b578-0a0578fefaab" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "validating: 100% 391/391 [04:48<00:00, 1.35it/s]\n", + " Class Images top1_acc top5_acc\n", + " all 50000 0.715 0.902\n", + " tench 50 0.94 0.98\n", + " goldfish 50 0.88 0.92\n", + " great white shark 50 0.78 0.96\n", + " tiger shark 50 0.68 0.96\n", + " hammerhead shark 50 0.82 0.92\n", + " electric ray 50 0.76 0.9\n", + " stingray 50 0.7 0.9\n", + " cock 50 0.78 0.92\n", + " hen 50 0.84 0.96\n", + " ostrich 50 0.98 1\n", + " brambling 50 0.9 0.96\n", + " goldfinch 50 0.92 0.98\n", + " house finch 50 0.88 0.96\n", + " junco 50 0.94 0.98\n", + " indigo bunting 50 0.86 0.88\n", + " American robin 50 0.9 0.96\n", + " bulbul 50 0.84 0.96\n", + " jay 50 0.9 0.96\n", + " magpie 50 0.84 0.96\n", + " chickadee 50 0.9 1\n", + " American dipper 50 0.82 0.92\n", + " kite 50 0.76 0.94\n", + " bald eagle 50 0.92 1\n", + " vulture 50 0.96 1\n", + " great grey owl 50 0.94 0.98\n", + " fire salamander 50 0.96 0.98\n", + " smooth newt 50 0.58 0.94\n", + " newt 50 0.74 0.9\n", + " spotted salamander 50 0.86 0.94\n", + " axolotl 50 0.86 0.96\n", + " American bullfrog 50 0.78 0.92\n", + " tree frog 50 0.84 0.96\n", + " tailed frog 50 0.48 0.8\n", + " loggerhead sea turtle 50 0.68 0.94\n", + " leatherback sea turtle 50 0.5 0.8\n", + " mud turtle 50 0.64 0.84\n", + " terrapin 50 0.52 0.98\n", + " box turtle 50 0.84 0.98\n", + " banded gecko 50 0.7 0.88\n", + " green iguana 50 0.76 0.94\n", + " Carolina anole 50 0.58 0.96\n", + "desert grassland whiptail lizard 50 0.82 0.94\n", + " agama 50 0.74 0.92\n", + " frilled-necked lizard 50 0.84 0.86\n", + " alligator lizard 50 0.58 0.78\n", + " Gila monster 50 0.72 0.8\n", + " European green lizard 50 0.42 0.9\n", + " chameleon 50 0.76 0.84\n", + " Komodo dragon 50 0.86 0.96\n", + " Nile crocodile 50 0.7 0.84\n", + " American alligator 50 0.76 0.96\n", + " triceratops 50 0.9 0.94\n", + " worm snake 50 0.76 0.88\n", + " ring-necked snake 50 0.8 0.92\n", + " eastern hog-nosed snake 50 0.58 0.88\n", + " smooth green snake 50 0.6 0.94\n", + " kingsnake 50 0.82 0.9\n", + " garter snake 50 0.88 0.94\n", + " water snake 50 0.7 0.94\n", + " vine snake 50 0.66 0.76\n", + " night snake 50 0.34 0.82\n", + " boa constrictor 50 0.8 0.96\n", + " African rock python 50 0.48 0.76\n", + " Indian cobra 50 0.82 0.94\n", + " green mamba 50 0.54 0.86\n", + " sea snake 50 0.62 0.9\n", + " Saharan horned viper 50 0.56 0.86\n", + "eastern diamondback rattlesnake 50 0.6 0.86\n", + " sidewinder 50 0.28 0.86\n", + " trilobite 50 0.98 0.98\n", + " harvestman 50 0.86 0.94\n", + " scorpion 50 0.86 0.94\n", + " yellow garden spider 50 0.92 0.96\n", + " barn spider 50 0.38 0.98\n", + " European garden spider 50 0.62 0.98\n", + " southern black widow 50 0.88 0.94\n", + " tarantula 50 0.94 1\n", + " wolf spider 50 0.82 0.92\n", + " tick 50 0.74 0.84\n", + " centipede 50 0.68 0.82\n", + " black grouse 50 0.88 0.98\n", + " ptarmigan 50 0.78 0.94\n", + " ruffed grouse 50 0.88 1\n", + " prairie grouse 50 0.92 1\n", + " peacock 50 0.88 0.9\n", + " quail 50 0.9 0.94\n", + " partridge 50 0.74 0.96\n", + " grey parrot 50 0.9 0.96\n", + " macaw 50 0.88 0.98\n", + "sulphur-crested cockatoo 50 0.86 0.92\n", + " lorikeet 50 0.96 1\n", + " coucal 50 0.82 0.88\n", + " bee eater 50 0.96 0.98\n", + " hornbill 50 0.9 0.96\n", + " hummingbird 50 0.88 0.96\n", + " jacamar 50 0.92 0.94\n", + " toucan 50 0.84 0.94\n", + " duck 50 0.76 0.94\n", + " red-breasted merganser 50 0.86 0.96\n", + " goose 50 0.74 0.96\n", + " black swan 50 0.94 0.98\n", + " tusker 50 0.54 0.92\n", + " echidna 50 0.98 1\n", + " platypus 50 0.72 0.84\n", + " wallaby 50 0.78 0.88\n", + " koala 50 0.84 0.92\n", + " wombat 50 0.78 0.84\n", + " jellyfish 50 0.88 0.96\n", + " sea anemone 50 0.72 0.9\n", + " brain coral 50 0.88 0.96\n", + " flatworm 50 0.8 0.98\n", + " nematode 50 0.86 0.9\n", + " conch 50 0.74 0.88\n", + " snail 50 0.78 0.88\n", + " slug 50 0.74 0.82\n", + " sea slug 50 0.88 0.98\n", + " chiton 50 0.88 0.98\n", + " chambered nautilus 50 0.88 0.92\n", + " Dungeness crab 50 0.78 0.94\n", + " rock crab 50 0.68 0.86\n", + " fiddler crab 50 0.64 0.86\n", + " red king crab 50 0.76 0.96\n", + " American lobster 50 0.78 0.96\n", + " spiny lobster 50 0.74 0.88\n", + " crayfish 50 0.56 0.86\n", + " hermit crab 50 0.78 0.96\n", + " isopod 50 0.66 0.78\n", + " white stork 50 0.88 0.96\n", + " black stork 50 0.84 0.98\n", + " spoonbill 50 0.96 1\n", + " flamingo 50 0.94 1\n", + " little blue heron 50 0.92 0.98\n", + " great egret 50 0.9 0.96\n", + " bittern 50 0.86 0.94\n", + " crane (bird) 50 0.62 0.9\n", + " limpkin 50 0.98 1\n", + " common gallinule 50 0.92 0.96\n", + " American coot 50 0.9 0.98\n", + " bustard 50 0.92 0.96\n", + " ruddy turnstone 50 0.94 1\n", + " dunlin 50 0.86 0.94\n", + " common redshank 50 0.9 0.96\n", + " dowitcher 50 0.84 0.96\n", + " oystercatcher 50 0.86 0.94\n", + " pelican 50 0.92 0.96\n", + " king penguin 50 0.88 0.96\n", + " albatross 50 0.9 1\n", + " grey whale 50 0.84 0.92\n", + " killer whale 50 0.92 1\n", + " dugong 50 0.84 0.96\n", + " sea lion 50 0.82 0.92\n", + " Chihuahua 50 0.66 0.84\n", + " Japanese Chin 50 0.72 0.98\n", + " Maltese 50 0.76 0.94\n", + " Pekingese 50 0.84 0.94\n", + " Shih Tzu 50 0.74 0.96\n", + " King Charles Spaniel 50 0.88 0.98\n", + " Papillon 50 0.86 0.94\n", + " toy terrier 50 0.48 0.94\n", + " Rhodesian Ridgeback 50 0.76 0.98\n", + " Afghan Hound 50 0.84 1\n", + " Basset Hound 50 0.8 0.92\n", + " Beagle 50 0.82 0.96\n", + " Bloodhound 50 0.48 0.72\n", + " Bluetick Coonhound 50 0.86 0.94\n", + " Black and Tan Coonhound 50 0.54 0.8\n", + "Treeing Walker Coonhound 50 0.66 0.98\n", + " English foxhound 50 0.32 0.84\n", + " Redbone Coonhound 50 0.62 0.94\n", + " borzoi 50 0.92 1\n", + " Irish Wolfhound 50 0.48 0.88\n", + " Italian Greyhound 50 0.76 0.98\n", + " Whippet 50 0.74 0.92\n", + " Ibizan Hound 50 0.6 0.86\n", + " Norwegian Elkhound 50 0.88 0.98\n", + " Otterhound 50 0.62 0.9\n", + " Saluki 50 0.72 0.92\n", + " Scottish Deerhound 50 0.86 0.98\n", + " Weimaraner 50 0.88 0.94\n", + "Staffordshire Bull Terrier 50 0.66 0.98\n", + "American Staffordshire Terrier 50 0.64 0.92\n", + " Bedlington Terrier 50 0.9 0.92\n", + " Border Terrier 50 0.86 0.92\n", + " Kerry Blue Terrier 50 0.78 0.98\n", + " Irish Terrier 50 0.7 0.96\n", + " Norfolk Terrier 50 0.68 0.9\n", + " Norwich Terrier 50 0.72 1\n", + " Yorkshire Terrier 50 0.66 0.9\n", + " Wire Fox Terrier 50 0.64 0.98\n", + " Lakeland Terrier 50 0.74 0.92\n", + " Sealyham Terrier 50 0.76 0.9\n", + " Airedale Terrier 50 0.82 0.92\n", + " Cairn Terrier 50 0.76 0.9\n", + " Australian Terrier 50 0.48 0.84\n", + " Dandie Dinmont Terrier 50 0.82 0.92\n", + " Boston Terrier 50 0.92 1\n", + " Miniature Schnauzer 50 0.68 0.9\n", + " Giant Schnauzer 50 0.72 0.98\n", + " Standard Schnauzer 50 0.74 1\n", + " Scottish Terrier 50 0.76 0.96\n", + " Tibetan Terrier 50 0.48 1\n", + "Australian Silky Terrier 50 0.66 0.96\n", + "Soft-coated Wheaten Terrier 50 0.74 0.96\n", + "West Highland White Terrier 50 0.88 0.96\n", + " Lhasa Apso 50 0.68 0.96\n", + " Flat-Coated Retriever 50 0.72 0.94\n", + " Curly-coated Retriever 50 0.82 0.94\n", + " Golden Retriever 50 0.86 0.94\n", + " Labrador Retriever 50 0.82 0.94\n", + "Chesapeake Bay Retriever 50 0.76 0.96\n", + "German Shorthaired Pointer 50 0.8 0.96\n", + " Vizsla 50 0.68 0.96\n", + " English Setter 50 0.7 1\n", + " Irish Setter 50 0.8 0.9\n", + " Gordon Setter 50 0.84 0.92\n", + " Brittany 50 0.84 0.96\n", + " Clumber Spaniel 50 0.92 0.96\n", + "English Springer Spaniel 50 0.88 1\n", + " Welsh Springer Spaniel 50 0.92 1\n", + " Cocker Spaniels 50 0.7 0.94\n", + " Sussex Spaniel 50 0.72 0.92\n", + " Irish Water Spaniel 50 0.88 0.98\n", + " Kuvasz 50 0.66 0.9\n", + " Schipperke 50 0.9 0.98\n", + " Groenendael 50 0.8 0.94\n", + " Malinois 50 0.86 0.98\n", + " Briard 50 0.52 0.8\n", + " Australian Kelpie 50 0.6 0.88\n", + " Komondor 50 0.88 0.94\n", + " Old English Sheepdog 50 0.94 0.98\n", + " Shetland Sheepdog 50 0.74 0.9\n", + " collie 50 0.6 0.96\n", + " Border Collie 50 0.74 0.96\n", + " Bouvier des Flandres 50 0.78 0.94\n", + " Rottweiler 50 0.88 0.96\n", + " German Shepherd Dog 50 0.8 0.98\n", + " Dobermann 50 0.68 0.96\n", + " Miniature Pinscher 50 0.76 0.88\n", + "Greater Swiss Mountain Dog 50 0.68 0.94\n", + " Bernese Mountain Dog 50 0.96 1\n", + " Appenzeller Sennenhund 50 0.22 1\n", + " Entlebucher Sennenhund 50 0.64 0.98\n", + " Boxer 50 0.7 0.92\n", + " Bullmastiff 50 0.78 0.98\n", + " Tibetan Mastiff 50 0.88 0.96\n", + " French Bulldog 50 0.84 0.94\n", + " Great Dane 50 0.54 0.9\n", + " St. Bernard 50 0.92 1\n", + " husky 50 0.46 0.98\n", + " Alaskan Malamute 50 0.76 0.96\n", + " Siberian Husky 50 0.46 0.98\n", + " Dalmatian 50 0.94 0.98\n", + " Affenpinscher 50 0.78 0.9\n", + " Basenji 50 0.92 0.94\n", + " pug 50 0.94 0.98\n", + " Leonberger 50 1 1\n", + " Newfoundland 50 0.78 0.96\n", + " Pyrenean Mountain Dog 50 0.78 0.96\n", + " Samoyed 50 0.96 1\n", + " Pomeranian 50 0.98 1\n", + " Chow Chow 50 0.9 0.96\n", + " Keeshond 50 0.88 0.94\n", + " Griffon Bruxellois 50 0.84 0.98\n", + " Pembroke Welsh Corgi 50 0.82 0.94\n", + " Cardigan Welsh Corgi 50 0.66 0.98\n", + " Toy Poodle 50 0.52 0.88\n", + " Miniature Poodle 50 0.52 0.92\n", + " Standard Poodle 50 0.8 1\n", + " Mexican hairless dog 50 0.88 0.98\n", + " grey wolf 50 0.82 0.92\n", + " Alaskan tundra wolf 50 0.78 0.98\n", + " red wolf 50 0.48 0.9\n", + " coyote 50 0.64 0.86\n", + " dingo 50 0.76 0.88\n", + " dhole 50 0.9 0.98\n", + " African wild dog 50 0.98 1\n", + " hyena 50 0.88 0.96\n", + " red fox 50 0.54 0.92\n", + " kit fox 50 0.72 0.98\n", + " Arctic fox 50 0.94 1\n", + " grey fox 50 0.7 0.94\n", + " tabby cat 50 0.54 0.92\n", + " tiger cat 50 0.22 0.94\n", + " Persian cat 50 0.9 0.98\n", + " Siamese cat 50 0.96 1\n", + " Egyptian Mau 50 0.54 0.8\n", + " cougar 50 0.9 1\n", + " lynx 50 0.72 0.88\n", + " leopard 50 0.78 0.98\n", + " snow leopard 50 0.9 0.98\n", + " jaguar 50 0.7 0.94\n", + " lion 50 0.9 0.98\n", + " tiger 50 0.92 0.98\n", + " cheetah 50 0.94 0.98\n", + " brown bear 50 0.94 0.98\n", + " American black bear 50 0.8 1\n", + " polar bear 50 0.84 0.96\n", + " sloth bear 50 0.72 0.92\n", + " mongoose 50 0.7 0.92\n", + " meerkat 50 0.82 0.92\n", + " tiger beetle 50 0.92 0.94\n", + " ladybug 50 0.86 0.94\n", + " ground beetle 50 0.64 0.94\n", + " longhorn beetle 50 0.62 0.88\n", + " leaf beetle 50 0.64 0.98\n", + " dung beetle 50 0.86 0.98\n", + " rhinoceros beetle 50 0.86 0.94\n", + " weevil 50 0.9 1\n", + " fly 50 0.78 0.94\n", + " bee 50 0.68 0.94\n", + " ant 50 0.68 0.78\n", + " grasshopper 50 0.5 0.92\n", + " cricket 50 0.64 0.92\n", + " stick insect 50 0.64 0.92\n", + " cockroach 50 0.72 0.8\n", + " mantis 50 0.64 0.86\n", + " cicada 50 0.9 0.96\n", + " leafhopper 50 0.88 0.94\n", + " lacewing 50 0.78 0.92\n", + " dragonfly 50 0.82 0.98\n", + " damselfly 50 0.82 1\n", + " red admiral 50 0.94 0.96\n", + " ringlet 50 0.86 0.98\n", + " monarch butterfly 50 0.9 0.92\n", + " small white 50 0.9 1\n", + " sulphur butterfly 50 0.92 1\n", + "gossamer-winged butterfly 50 0.88 1\n", + " starfish 50 0.88 0.92\n", + " sea urchin 50 0.84 0.94\n", + " sea cucumber 50 0.66 0.84\n", + " cottontail rabbit 50 0.72 0.94\n", + " hare 50 0.84 0.96\n", + " Angora rabbit 50 0.94 0.98\n", + " hamster 50 0.96 1\n", + " porcupine 50 0.88 0.98\n", + " fox squirrel 50 0.76 0.94\n", + " marmot 50 0.92 0.96\n", + " beaver 50 0.78 0.94\n", + " guinea pig 50 0.78 0.94\n", + " common sorrel 50 0.96 0.98\n", + " zebra 50 0.94 0.96\n", + " pig 50 0.5 0.76\n", + " wild boar 50 0.84 0.96\n", + " warthog 50 0.84 0.96\n", + " hippopotamus 50 0.88 0.96\n", + " ox 50 0.48 0.94\n", + " water buffalo 50 0.78 0.94\n", + " bison 50 0.88 0.96\n", + " ram 50 0.58 0.92\n", + " bighorn sheep 50 0.66 1\n", + " Alpine ibex 50 0.92 0.98\n", + " hartebeest 50 0.94 1\n", + " impala 50 0.82 0.96\n", + " gazelle 50 0.7 0.96\n", + " dromedary 50 0.9 1\n", + " llama 50 0.82 0.94\n", + " weasel 50 0.44 0.92\n", + " mink 50 0.78 0.96\n", + " European polecat 50 0.46 0.9\n", + " black-footed ferret 50 0.68 0.96\n", + " otter 50 0.66 0.88\n", + " skunk 50 0.96 0.96\n", + " badger 50 0.86 0.92\n", + " armadillo 50 0.88 0.9\n", + " three-toed sloth 50 0.96 1\n", + " orangutan 50 0.78 0.92\n", + " gorilla 50 0.82 0.94\n", + " chimpanzee 50 0.84 0.94\n", + " gibbon 50 0.76 0.86\n", + " siamang 50 0.68 0.94\n", + " guenon 50 0.8 0.94\n", + " patas monkey 50 0.62 0.82\n", + " baboon 50 0.9 0.98\n", + " macaque 50 0.8 0.86\n", + " langur 50 0.6 0.82\n", + " black-and-white colobus 50 0.86 0.9\n", + " proboscis monkey 50 1 1\n", + " marmoset 50 0.74 0.98\n", + " white-headed capuchin 50 0.72 0.9\n", + " howler monkey 50 0.86 0.94\n", + " titi 50 0.5 0.9\n", + "Geoffroy's spider monkey 50 0.42 0.8\n", + " common squirrel monkey 50 0.76 0.92\n", + " ring-tailed lemur 50 0.72 0.94\n", + " indri 50 0.9 0.96\n", + " Asian elephant 50 0.58 0.92\n", + " African bush elephant 50 0.7 0.98\n", + " red panda 50 0.94 0.94\n", + " giant panda 50 0.94 0.98\n", + " snoek 50 0.74 0.9\n", + " eel 50 0.6 0.84\n", + " coho salmon 50 0.84 0.96\n", + " rock beauty 50 0.88 0.98\n", + " clownfish 50 0.78 0.98\n", + " sturgeon 50 0.68 0.94\n", + " garfish 50 0.62 0.8\n", + " lionfish 50 0.96 0.96\n", + " pufferfish 50 0.88 0.96\n", + " abacus 50 0.74 0.88\n", + " abaya 50 0.84 0.92\n", + " academic gown 50 0.42 0.86\n", + " accordion 50 0.8 0.9\n", + " acoustic guitar 50 0.5 0.76\n", + " aircraft carrier 50 0.8 0.96\n", + " airliner 50 0.92 1\n", + " airship 50 0.76 0.82\n", + " altar 50 0.64 0.98\n", + " ambulance 50 0.88 0.98\n", + " amphibious vehicle 50 0.64 0.94\n", + " analog clock 50 0.52 0.92\n", + " apiary 50 0.82 0.96\n", + " apron 50 0.7 0.84\n", + " waste container 50 0.4 0.8\n", + " assault rifle 50 0.42 0.84\n", + " backpack 50 0.34 0.64\n", + " bakery 50 0.4 0.68\n", + " balance beam 50 0.8 0.98\n", + " balloon 50 0.86 0.96\n", + " ballpoint pen 50 0.52 0.96\n", + " Band-Aid 50 0.7 0.9\n", + " banjo 50 0.84 1\n", + " baluster 50 0.68 0.94\n", + " barbell 50 0.56 0.9\n", + " barber chair 50 0.7 0.92\n", + " barbershop 50 0.54 0.86\n", + " barn 50 0.96 0.96\n", + " barometer 50 0.84 0.98\n", + " barrel 50 0.56 0.88\n", + " wheelbarrow 50 0.66 0.88\n", + " baseball 50 0.74 0.98\n", + " basketball 50 0.88 0.98\n", + " bassinet 50 0.66 0.92\n", + " bassoon 50 0.74 0.98\n", + " swimming cap 50 0.62 0.88\n", + " bath towel 50 0.54 0.78\n", + " bathtub 50 0.4 0.88\n", + " station wagon 50 0.66 0.84\n", + " lighthouse 50 0.78 0.94\n", + " beaker 50 0.52 0.68\n", + " military cap 50 0.84 0.96\n", + " beer bottle 50 0.66 0.88\n", + " beer glass 50 0.6 0.84\n", + " bell-cot 50 0.56 0.96\n", + " bib 50 0.58 0.82\n", + " tandem bicycle 50 0.86 0.96\n", + " bikini 50 0.56 0.88\n", + " ring binder 50 0.64 0.84\n", + " binoculars 50 0.54 0.78\n", + " birdhouse 50 0.86 0.94\n", + " boathouse 50 0.74 0.92\n", + " bobsleigh 50 0.92 0.96\n", + " bolo tie 50 0.8 0.94\n", + " poke bonnet 50 0.64 0.86\n", + " bookcase 50 0.66 0.92\n", + " bookstore 50 0.62 0.88\n", + " bottle cap 50 0.58 0.7\n", + " bow 50 0.72 0.86\n", + " bow tie 50 0.7 0.9\n", + " brass 50 0.92 0.96\n", + " bra 50 0.5 0.7\n", + " breakwater 50 0.62 0.86\n", + " breastplate 50 0.4 0.9\n", + " broom 50 0.6 0.86\n", + " bucket 50 0.66 0.8\n", + " buckle 50 0.5 0.68\n", + " bulletproof vest 50 0.5 0.78\n", + " high-speed train 50 0.94 0.96\n", + " butcher shop 50 0.74 0.94\n", + " taxicab 50 0.64 0.86\n", + " cauldron 50 0.44 0.66\n", + " candle 50 0.48 0.74\n", + " cannon 50 0.88 0.94\n", + " canoe 50 0.94 1\n", + " can opener 50 0.66 0.86\n", + " cardigan 50 0.68 0.8\n", + " car mirror 50 0.94 0.96\n", + " carousel 50 0.94 0.98\n", + " tool kit 50 0.56 0.78\n", + " carton 50 0.42 0.7\n", + " car wheel 50 0.38 0.74\n", + "automated teller machine 50 0.76 0.94\n", + " cassette 50 0.52 0.8\n", + " cassette player 50 0.28 0.9\n", + " castle 50 0.78 0.88\n", + " catamaran 50 0.78 1\n", + " CD player 50 0.52 0.82\n", + " cello 50 0.82 1\n", + " mobile phone 50 0.68 0.86\n", + " chain 50 0.38 0.66\n", + " chain-link fence 50 0.7 0.84\n", + " chain mail 50 0.64 0.9\n", + " chainsaw 50 0.84 0.92\n", + " chest 50 0.68 0.92\n", + " chiffonier 50 0.26 0.64\n", + " chime 50 0.62 0.84\n", + " china cabinet 50 0.82 0.96\n", + " Christmas stocking 50 0.92 0.94\n", + " church 50 0.62 0.9\n", + " movie theater 50 0.58 0.88\n", + " cleaver 50 0.32 0.62\n", + " cliff dwelling 50 0.88 1\n", + " cloak 50 0.32 0.64\n", + " clogs 50 0.58 0.88\n", + " cocktail shaker 50 0.62 0.7\n", + " coffee mug 50 0.44 0.72\n", + " coffeemaker 50 0.64 0.92\n", + " coil 50 0.66 0.84\n", + " combination lock 50 0.64 0.84\n", + " computer keyboard 50 0.7 0.82\n", + " confectionery store 50 0.54 0.86\n", + " container ship 50 0.82 0.98\n", + " convertible 50 0.78 0.98\n", + " corkscrew 50 0.82 0.92\n", + " cornet 50 0.46 0.88\n", + " cowboy boot 50 0.64 0.8\n", + " cowboy hat 50 0.64 0.82\n", + " cradle 50 0.38 0.8\n", + " crane (machine) 50 0.78 0.94\n", + " crash helmet 50 0.92 0.96\n", + " crate 50 0.52 0.82\n", + " infant bed 50 0.74 1\n", + " Crock Pot 50 0.78 0.9\n", + " croquet ball 50 0.9 0.96\n", + " crutch 50 0.46 0.7\n", + " cuirass 50 0.54 0.86\n", + " dam 50 0.74 0.92\n", + " desk 50 0.6 0.86\n", + " desktop computer 50 0.54 0.94\n", + " rotary dial telephone 50 0.88 0.94\n", + " diaper 50 0.68 0.84\n", + " digital clock 50 0.54 0.76\n", + " digital watch 50 0.58 0.86\n", + " dining table 50 0.76 0.9\n", + " dishcloth 50 0.94 1\n", + " dishwasher 50 0.44 0.78\n", + " disc brake 50 0.98 1\n", + " dock 50 0.54 0.94\n", + " dog sled 50 0.84 1\n", + " dome 50 0.72 0.92\n", + " doormat 50 0.56 0.82\n", + " drilling rig 50 0.84 0.96\n", + " drum 50 0.38 0.68\n", + " drumstick 50 0.56 0.72\n", + " dumbbell 50 0.62 0.9\n", + " Dutch oven 50 0.7 0.84\n", + " electric fan 50 0.82 0.86\n", + " electric guitar 50 0.62 0.84\n", + " electric locomotive 50 0.92 0.98\n", + " entertainment center 50 0.9 0.98\n", + " envelope 50 0.44 0.86\n", + " espresso machine 50 0.72 0.94\n", + " face powder 50 0.7 0.92\n", + " feather boa 50 0.7 0.84\n", + " filing cabinet 50 0.88 0.98\n", + " fireboat 50 0.94 0.98\n", + " fire engine 50 0.84 0.9\n", + " fire screen sheet 50 0.62 0.76\n", + " flagpole 50 0.74 0.88\n", + " flute 50 0.36 0.72\n", + " folding chair 50 0.62 0.84\n", + " football helmet 50 0.86 0.94\n", + " forklift 50 0.8 0.92\n", + " fountain 50 0.84 0.94\n", + " fountain pen 50 0.76 0.92\n", + " four-poster bed 50 0.78 0.94\n", + " freight car 50 0.96 1\n", + " French horn 50 0.76 0.92\n", + " frying pan 50 0.36 0.78\n", + " fur coat 50 0.84 0.96\n", + " garbage truck 50 0.9 0.98\n", + " gas mask 50 0.84 0.92\n", + " gas pump 50 0.9 0.98\n", + " goblet 50 0.68 0.82\n", + " go-kart 50 0.9 1\n", + " golf ball 50 0.84 0.9\n", + " golf cart 50 0.78 0.86\n", + " gondola 50 0.98 0.98\n", + " gong 50 0.74 0.92\n", + " gown 50 0.62 0.96\n", + " grand piano 50 0.7 0.96\n", + " greenhouse 50 0.8 0.98\n", + " grille 50 0.72 0.9\n", + " grocery store 50 0.66 0.94\n", + " guillotine 50 0.86 0.92\n", + " barrette 50 0.52 0.66\n", + " hair spray 50 0.5 0.74\n", + " half-track 50 0.78 0.9\n", + " hammer 50 0.56 0.76\n", + " hamper 50 0.64 0.84\n", + " hair dryer 50 0.56 0.74\n", + " hand-held computer 50 0.42 0.86\n", + " handkerchief 50 0.78 0.94\n", + " hard disk drive 50 0.76 0.84\n", + " harmonica 50 0.7 0.88\n", + " harp 50 0.88 0.96\n", + " harvester 50 0.78 1\n", + " hatchet 50 0.54 0.74\n", + " holster 50 0.66 0.84\n", + " home theater 50 0.64 0.94\n", + " honeycomb 50 0.56 0.88\n", + " hook 50 0.3 0.6\n", + " hoop skirt 50 0.64 0.86\n", + " horizontal bar 50 0.68 0.98\n", + " horse-drawn vehicle 50 0.88 0.94\n", + " hourglass 50 0.88 0.96\n", + " iPod 50 0.76 0.94\n", + " clothes iron 50 0.82 0.88\n", + " jack-o'-lantern 50 0.98 0.98\n", + " jeans 50 0.68 0.84\n", + " jeep 50 0.72 0.9\n", + " T-shirt 50 0.72 0.96\n", + " jigsaw puzzle 50 0.84 0.94\n", + " pulled rickshaw 50 0.86 0.94\n", + " joystick 50 0.8 0.9\n", + " kimono 50 0.84 0.96\n", + " knee pad 50 0.62 0.88\n", + " knot 50 0.66 0.8\n", + " lab coat 50 0.8 0.96\n", + " ladle 50 0.36 0.64\n", + " lampshade 50 0.48 0.84\n", + " laptop computer 50 0.26 0.88\n", + " lawn mower 50 0.78 0.96\n", + " lens cap 50 0.46 0.72\n", + " paper knife 50 0.26 0.5\n", + " library 50 0.54 0.9\n", + " lifeboat 50 0.92 0.98\n", + " lighter 50 0.56 0.78\n", + " limousine 50 0.76 0.92\n", + " ocean liner 50 0.88 0.94\n", + " lipstick 50 0.74 0.9\n", + " slip-on shoe 50 0.74 0.92\n", + " lotion 50 0.5 0.86\n", + " speaker 50 0.52 0.68\n", + " loupe 50 0.32 0.52\n", + " sawmill 50 0.72 0.9\n", + " magnetic compass 50 0.52 0.82\n", + " mail bag 50 0.68 0.92\n", + " mailbox 50 0.82 0.92\n", + " tights 50 0.22 0.94\n", + " tank suit 50 0.24 0.9\n", + " manhole cover 50 0.96 0.98\n", + " maraca 50 0.74 0.9\n", + " marimba 50 0.84 0.94\n", + " mask 50 0.44 0.82\n", + " match 50 0.66 0.9\n", + " maypole 50 0.96 1\n", + " maze 50 0.8 0.96\n", + " measuring cup 50 0.54 0.76\n", + " medicine chest 50 0.6 0.84\n", + " megalith 50 0.8 0.92\n", + " microphone 50 0.52 0.7\n", + " microwave oven 50 0.48 0.72\n", + " military uniform 50 0.62 0.84\n", + " milk can 50 0.68 0.82\n", + " minibus 50 0.7 1\n", + " miniskirt 50 0.46 0.76\n", + " minivan 50 0.38 0.8\n", + " missile 50 0.4 0.84\n", + " mitten 50 0.76 0.88\n", + " mixing bowl 50 0.8 0.92\n", + " mobile home 50 0.54 0.78\n", + " Model T 50 0.92 0.96\n", + " modem 50 0.58 0.86\n", + " monastery 50 0.44 0.9\n", + " monitor 50 0.4 0.86\n", + " moped 50 0.56 0.94\n", + " mortar 50 0.68 0.94\n", + " square academic cap 50 0.5 0.84\n", + " mosque 50 0.9 1\n", + " mosquito net 50 0.9 0.98\n", + " scooter 50 0.9 0.98\n", + " mountain bike 50 0.78 0.96\n", + " tent 50 0.88 0.96\n", + " computer mouse 50 0.42 0.82\n", + " mousetrap 50 0.76 0.88\n", + " moving van 50 0.4 0.72\n", + " muzzle 50 0.5 0.72\n", + " nail 50 0.68 0.74\n", + " neck brace 50 0.56 0.68\n", + " necklace 50 0.86 1\n", + " nipple 50 0.7 0.88\n", + " notebook computer 50 0.34 0.84\n", + " obelisk 50 0.8 0.92\n", + " oboe 50 0.6 0.84\n", + " ocarina 50 0.8 0.86\n", + " odometer 50 0.96 1\n", + " oil filter 50 0.58 0.82\n", + " organ 50 0.82 0.9\n", + " oscilloscope 50 0.9 0.96\n", + " overskirt 50 0.2 0.7\n", + " bullock cart 50 0.7 0.94\n", + " oxygen mask 50 0.46 0.84\n", + " packet 50 0.5 0.78\n", + " paddle 50 0.56 0.94\n", + " paddle wheel 50 0.86 0.96\n", + " padlock 50 0.74 0.78\n", + " paintbrush 50 0.62 0.8\n", + " pajamas 50 0.56 0.92\n", + " palace 50 0.64 0.96\n", + " pan flute 50 0.84 0.86\n", + " paper towel 50 0.66 0.84\n", + " parachute 50 0.92 0.94\n", + " parallel bars 50 0.62 0.96\n", + " park bench 50 0.74 0.9\n", + " parking meter 50 0.84 0.92\n", + " passenger car 50 0.5 0.82\n", + " patio 50 0.58 0.84\n", + " payphone 50 0.74 0.92\n", + " pedestal 50 0.52 0.9\n", + " pencil case 50 0.64 0.92\n", + " pencil sharpener 50 0.52 0.78\n", + " perfume 50 0.7 0.9\n", + " Petri dish 50 0.6 0.8\n", + " photocopier 50 0.88 0.98\n", + " plectrum 50 0.7 0.84\n", + " Pickelhaube 50 0.72 0.86\n", + " picket fence 50 0.84 0.94\n", + " pickup truck 50 0.64 0.92\n", + " pier 50 0.52 0.82\n", + " piggy bank 50 0.82 0.94\n", + " pill bottle 50 0.76 0.86\n", + " pillow 50 0.76 0.9\n", + " ping-pong ball 50 0.84 0.88\n", + " pinwheel 50 0.76 0.88\n", + " pirate ship 50 0.76 0.94\n", + " pitcher 50 0.46 0.84\n", + " hand plane 50 0.84 0.94\n", + " planetarium 50 0.88 0.98\n", + " plastic bag 50 0.36 0.62\n", + " plate rack 50 0.52 0.78\n", + " plow 50 0.78 0.88\n", + " plunger 50 0.42 0.7\n", + " Polaroid camera 50 0.84 0.92\n", + " pole 50 0.38 0.74\n", + " police van 50 0.76 0.94\n", + " poncho 50 0.58 0.86\n", + " billiard table 50 0.8 0.88\n", + " soda bottle 50 0.56 0.94\n", + " pot 50 0.78 0.92\n", + " potter's wheel 50 0.9 0.94\n", + " power drill 50 0.42 0.72\n", + " prayer rug 50 0.7 0.86\n", + " printer 50 0.54 0.86\n", + " prison 50 0.7 0.9\n", + " projectile 50 0.28 0.9\n", + " projector 50 0.62 0.84\n", + " hockey puck 50 0.92 0.96\n", + " punching bag 50 0.6 0.68\n", + " purse 50 0.42 0.78\n", + " quill 50 0.68 0.84\n", + " quilt 50 0.64 0.9\n", + " race car 50 0.72 0.92\n", + " racket 50 0.72 0.9\n", + " radiator 50 0.66 0.76\n", + " radio 50 0.64 0.92\n", + " radio telescope 50 0.9 0.96\n", + " rain barrel 50 0.8 0.98\n", + " recreational vehicle 50 0.84 0.94\n", + " reel 50 0.72 0.82\n", + " reflex camera 50 0.72 0.92\n", + " refrigerator 50 0.7 0.9\n", + " remote control 50 0.7 0.88\n", + " restaurant 50 0.5 0.66\n", + " revolver 50 0.82 1\n", + " rifle 50 0.38 0.7\n", + " rocking chair 50 0.62 0.84\n", + " rotisserie 50 0.88 0.92\n", + " eraser 50 0.54 0.76\n", + " rugby ball 50 0.86 0.94\n", + " ruler 50 0.68 0.86\n", + " running shoe 50 0.78 0.94\n", + " safe 50 0.82 0.92\n", + " safety pin 50 0.4 0.62\n", + " salt shaker 50 0.66 0.9\n", + " sandal 50 0.66 0.86\n", + " sarong 50 0.64 0.86\n", + " saxophone 50 0.66 0.88\n", + " scabbard 50 0.76 0.92\n", + " weighing scale 50 0.58 0.78\n", + " school bus 50 0.92 1\n", + " schooner 50 0.84 1\n", + " scoreboard 50 0.9 0.96\n", + " CRT screen 50 0.14 0.7\n", + " screw 50 0.9 0.98\n", + " screwdriver 50 0.3 0.58\n", + " seat belt 50 0.88 0.94\n", + " sewing machine 50 0.76 0.9\n", + " shield 50 0.56 0.82\n", + " shoe store 50 0.78 0.96\n", + " shoji 50 0.8 0.92\n", + " shopping basket 50 0.52 0.88\n", + " shopping cart 50 0.76 0.92\n", + " shovel 50 0.62 0.84\n", + " shower cap 50 0.7 0.84\n", + " shower curtain 50 0.64 0.82\n", + " ski 50 0.74 0.92\n", + " ski mask 50 0.72 0.88\n", + " sleeping bag 50 0.68 0.8\n", + " slide rule 50 0.72 0.88\n", + " sliding door 50 0.44 0.78\n", + " slot machine 50 0.94 0.98\n", + " snorkel 50 0.86 0.98\n", + " snowmobile 50 0.88 1\n", + " snowplow 50 0.84 0.98\n", + " soap dispenser 50 0.56 0.86\n", + " soccer ball 50 0.86 0.96\n", + " sock 50 0.62 0.76\n", + " solar thermal collector 50 0.72 0.96\n", + " sombrero 50 0.6 0.84\n", + " soup bowl 50 0.56 0.94\n", + " space bar 50 0.34 0.88\n", + " space heater 50 0.52 0.74\n", + " space shuttle 50 0.82 0.96\n", + " spatula 50 0.3 0.6\n", + " motorboat 50 0.86 1\n", + " spider web 50 0.7 0.9\n", + " spindle 50 0.86 0.98\n", + " sports car 50 0.6 0.94\n", + " spotlight 50 0.26 0.6\n", + " stage 50 0.68 0.86\n", + " steam locomotive 50 0.94 1\n", + " through arch bridge 50 0.84 0.96\n", + " steel drum 50 0.82 0.9\n", + " stethoscope 50 0.6 0.82\n", + " scarf 50 0.5 0.92\n", + " stone wall 50 0.76 0.9\n", + " stopwatch 50 0.58 0.9\n", + " stove 50 0.46 0.74\n", + " strainer 50 0.64 0.84\n", + " tram 50 0.88 0.96\n", + " stretcher 50 0.6 0.8\n", + " couch 50 0.8 0.96\n", + " stupa 50 0.88 0.88\n", + " submarine 50 0.72 0.92\n", + " suit 50 0.4 0.78\n", + " sundial 50 0.58 0.74\n", + " sunglass 50 0.14 0.58\n", + " sunglasses 50 0.28 0.58\n", + " sunscreen 50 0.32 0.7\n", + " suspension bridge 50 0.6 0.94\n", + " mop 50 0.74 0.92\n", + " sweatshirt 50 0.28 0.66\n", + " swimsuit 50 0.52 0.82\n", + " swing 50 0.76 0.84\n", + " switch 50 0.56 0.76\n", + " syringe 50 0.62 0.82\n", + " table lamp 50 0.6 0.88\n", + " tank 50 0.8 0.96\n", + " tape player 50 0.46 0.76\n", + " teapot 50 0.84 1\n", + " teddy bear 50 0.82 0.94\n", + " television 50 0.6 0.9\n", + " tennis ball 50 0.7 0.94\n", + " thatched roof 50 0.88 0.9\n", + " front curtain 50 0.8 0.92\n", + " thimble 50 0.6 0.8\n", + " threshing machine 50 0.56 0.88\n", + " throne 50 0.72 0.82\n", + " tile roof 50 0.72 0.94\n", + " toaster 50 0.66 0.84\n", + " tobacco shop 50 0.42 0.7\n", + " toilet seat 50 0.62 0.88\n", + " torch 50 0.64 0.84\n", + " totem pole 50 0.92 0.98\n", + " tow truck 50 0.62 0.88\n", + " toy store 50 0.6 0.94\n", + " tractor 50 0.76 0.98\n", + " semi-trailer truck 50 0.78 0.92\n", + " tray 50 0.46 0.64\n", + " trench coat 50 0.54 0.72\n", + " tricycle 50 0.72 0.94\n", + " trimaran 50 0.7 0.98\n", + " tripod 50 0.58 0.86\n", + " triumphal arch 50 0.92 0.98\n", + " trolleybus 50 0.9 1\n", + " trombone 50 0.54 0.88\n", + " tub 50 0.24 0.82\n", + " turnstile 50 0.84 0.94\n", + " typewriter keyboard 50 0.68 0.98\n", + " umbrella 50 0.52 0.7\n", + " unicycle 50 0.74 0.96\n", + " upright piano 50 0.76 0.9\n", + " vacuum cleaner 50 0.62 0.9\n", + " vase 50 0.5 0.78\n", + " vault 50 0.76 0.92\n", + " velvet 50 0.2 0.42\n", + " vending machine 50 0.9 1\n", + " vestment 50 0.54 0.82\n", + " viaduct 50 0.78 0.86\n", + " violin 50 0.68 0.78\n", + " volleyball 50 0.86 1\n", + " waffle iron 50 0.72 0.88\n", + " wall clock 50 0.54 0.88\n", + " wallet 50 0.52 0.9\n", + " wardrobe 50 0.68 0.88\n", + " military aircraft 50 0.9 0.98\n", + " sink 50 0.72 0.96\n", + " washing machine 50 0.78 0.94\n", + " water bottle 50 0.54 0.74\n", + " water jug 50 0.22 0.74\n", + " water tower 50 0.9 0.96\n", + " whiskey jug 50 0.64 0.74\n", + " whistle 50 0.72 0.84\n", + " wig 50 0.84 0.9\n", + " window screen 50 0.68 0.8\n", + " window shade 50 0.52 0.76\n", + " Windsor tie 50 0.22 0.66\n", + " wine bottle 50 0.42 0.82\n", + " wing 50 0.54 0.96\n", + " wok 50 0.46 0.82\n", + " wooden spoon 50 0.58 0.8\n", + " wool 50 0.32 0.82\n", + " split-rail fence 50 0.74 0.9\n", + " shipwreck 50 0.84 0.96\n", + " yawl 50 0.78 0.96\n", + " yurt 50 0.84 1\n", + " website 50 0.98 1\n", + " comic book 50 0.62 0.9\n", + " crossword 50 0.84 0.88\n", + " traffic sign 50 0.78 0.9\n", + " traffic light 50 0.8 0.94\n", + " dust jacket 50 0.72 0.94\n", + " menu 50 0.82 0.96\n", + " plate 50 0.44 0.88\n", + " guacamole 50 0.8 0.92\n", + " consomme 50 0.54 0.88\n", + " hot pot 50 0.86 0.98\n", + " trifle 50 0.92 0.98\n", + " ice cream 50 0.68 0.94\n", + " ice pop 50 0.62 0.84\n", + " baguette 50 0.62 0.88\n", + " bagel 50 0.64 0.92\n", + " pretzel 50 0.72 0.88\n", + " cheeseburger 50 0.9 1\n", + " hot dog 50 0.74 0.94\n", + " mashed potato 50 0.74 0.9\n", + " cabbage 50 0.84 0.96\n", + " broccoli 50 0.9 0.96\n", + " cauliflower 50 0.82 1\n", + " zucchini 50 0.74 0.9\n", + " spaghetti squash 50 0.8 0.96\n", + " acorn squash 50 0.82 0.96\n", + " butternut squash 50 0.7 0.94\n", + " cucumber 50 0.6 0.96\n", + " artichoke 50 0.84 0.94\n", + " bell pepper 50 0.84 0.98\n", + " cardoon 50 0.88 0.94\n", + " mushroom 50 0.38 0.92\n", + " Granny Smith 50 0.9 0.96\n", + " strawberry 50 0.6 0.88\n", + " orange 50 0.7 0.92\n", + " lemon 50 0.78 0.98\n", + " fig 50 0.82 0.96\n", + " pineapple 50 0.86 0.96\n", + " banana 50 0.84 0.96\n", + " jackfruit 50 0.9 0.98\n", + " custard apple 50 0.86 0.96\n", + " pomegranate 50 0.82 0.98\n", + " hay 50 0.8 0.92\n", + " carbonara 50 0.88 0.94\n", + " chocolate syrup 50 0.46 0.84\n", + " dough 50 0.4 0.6\n", + " meatloaf 50 0.58 0.84\n", + " pizza 50 0.84 0.96\n", + " pot pie 50 0.68 0.9\n", + " burrito 50 0.8 0.98\n", + " red wine 50 0.54 0.82\n", + " espresso 50 0.64 0.88\n", + " cup 50 0.38 0.7\n", + " eggnog 50 0.38 0.7\n", + " alp 50 0.54 0.88\n", + " bubble 50 0.8 0.96\n", + " cliff 50 0.64 1\n", + " coral reef 50 0.72 0.96\n", + " geyser 50 0.94 1\n", + " lakeshore 50 0.54 0.88\n", + " promontory 50 0.58 0.94\n", + " shoal 50 0.6 0.96\n", + " seashore 50 0.44 0.78\n", + " valley 50 0.72 0.94\n", + " volcano 50 0.78 0.96\n", + " baseball player 50 0.72 0.94\n", + " bridegroom 50 0.72 0.88\n", + " scuba diver 50 0.8 1\n", + " rapeseed 50 0.94 0.98\n", + " daisy 50 0.96 0.98\n", + " yellow lady's slipper 50 1 1\n", + " corn 50 0.4 0.88\n", + " acorn 50 0.92 0.98\n", + " rose hip 50 0.92 0.98\n", + " horse chestnut seed 50 0.94 0.98\n", + " coral fungus 50 0.96 0.96\n", + " agaric 50 0.82 0.94\n", + " gyromitra 50 0.98 1\n", + " stinkhorn mushroom 50 0.8 0.94\n", + " earth star 50 0.98 1\n", + " hen-of-the-woods 50 0.8 0.96\n", + " bolete 50 0.74 0.94\n", + " ear 50 0.48 0.94\n", + " toilet paper 50 0.36 0.68\n", + "Speed: 0.1ms pre-process, 0.3ms inference, 0.0ms post-process per image at shape (1, 3, 224, 224)\n", + "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s on Imagenet val\n", + "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 --half" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " patio 50 0.6 0.84\n", - " payphone 50 0.78 0.94\n", - " pedestal 50 0.66 0.86\n", - " pencil case 50 0.74 0.98\n", - " pencil sharpener 50 0.6 0.76\n", - " perfume 50 0.66 0.96\n", - " Petri dish 50 0.64 0.82\n", - " photocopier 50 0.94 1\n", - " plectrum 50 0.72 0.92\n", - " Pickelhaube 50 0.78 0.88\n", - " picket fence 50 0.86 0.94\n", - " pickup truck 50 0.72 0.94\n", - " pier 50 0.54 0.92\n", - " piggy bank 50 0.8 0.94\n", - " pill bottle 50 0.72 0.9\n", - " pillow 50 0.76 0.88\n", - " ping-pong ball 50 0.78 0.88\n", - " pinwheel 50 0.8 0.94\n", - " pirate ship 50 0.76 0.92\n", - " pitcher 50 0.48 0.86\n", - " hand plane 50 0.9 0.92\n", - " planetarium 50 0.9 0.98\n", - " plastic bag 50 0.42 0.66\n", - " plate rack 50 0.52 0.82\n", - " plow 50 0.8 0.94\n", - " plunger 50 0.42 0.72\n", - " Polaroid camera 50 0.84 0.94\n", - " pole 50 0.4 0.76\n", - " police van 50 0.84 0.94\n", - " poncho 50 0.64 0.88\n", - " billiard table 50 0.84 0.92\n", - " soda bottle 50 0.58 0.9\n", - " pot 50 0.86 0.94\n", - " potter's wheel 50 0.92 0.94\n", - " power drill 50 0.38 0.7\n", - " prayer rug 50 0.7 0.88\n", - " printer 50 0.52 0.86\n", - " prison 50 0.66 0.9\n", - " projectile 50 0.34 0.96\n", - " projector 50 0.6 0.82\n", - " hockey puck 50 0.9 0.98\n", - " punching bag 50 0.62 0.72\n", - " purse 50 0.48 0.88\n", - " quill 50 0.78 0.86\n", - " quilt 50 0.6 0.9\n", - " race car 50 0.72 0.92\n", - " racket 50 0.78 0.94\n", - " radiator 50 0.7 0.84\n", - " radio 50 0.68 0.9\n", - " radio telescope 50 0.88 0.94\n", - " rain barrel 50 0.8 0.96\n", - " recreational vehicle 50 0.84 0.96\n", - " reel 50 0.72 0.8\n", - " reflex camera 50 0.76 0.96\n", - " refrigerator 50 0.76 0.92\n", - " remote control 50 0.72 0.94\n", - " restaurant 50 0.52 0.62\n", - " revolver 50 0.8 0.98\n", - " rifle 50 0.46 0.76\n", - " rocking chair 50 0.72 0.9\n", - " rotisserie 50 0.88 0.96\n", - " eraser 50 0.62 0.76\n", - " rugby ball 50 0.84 0.94\n", - " ruler 50 0.72 0.86\n", - " running shoe 50 0.84 0.94\n", - " safe 50 0.9 0.94\n", - " safety pin 50 0.48 0.8\n", - " salt shaker 50 0.62 0.8\n", - " sandal 50 0.7 0.82\n", - " sarong 50 0.62 0.8\n", - " saxophone 50 0.66 0.9\n", - " scabbard 50 0.78 0.92\n", - " weighing scale 50 0.62 0.84\n", - " school bus 50 0.92 1\n", - " schooner 50 0.8 1\n", - " scoreboard 50 0.86 0.98\n", - " CRT screen 50 0.16 0.8\n", - " screw 50 0.96 0.98\n", - " screwdriver 50 0.4 0.58\n", - " seat belt 50 0.9 0.92\n", - " sewing machine 50 0.74 0.94\n", - " shield 50 0.64 0.78\n", - " shoe store 50 0.84 0.98\n", - " shoji 50 0.76 0.92\n", - " shopping basket 50 0.52 0.84\n", - " shopping cart 50 0.76 0.9\n", - " shovel 50 0.7 0.84\n", - " shower cap 50 0.74 0.88\n", - " shower curtain 50 0.72 0.9\n", - " ski 50 0.68 0.94\n", - " ski mask 50 0.66 0.9\n", - " sleeping bag 50 0.66 0.8\n", - " slide rule 50 0.7 0.86\n", - " sliding door 50 0.54 0.76\n", - " slot machine 50 0.92 0.96\n", - " snorkel 50 0.86 1\n", - " snowmobile 50 0.86 0.96\n", - " snowplow 50 0.9 1\n", - " soap dispenser 50 0.52 0.9\n", - " soccer ball 50 0.84 0.98\n", - " sock 50 0.66 0.78\n", - " solar thermal collector 50 0.72 0.9\n", - " sombrero 50 0.7 0.84\n", - " soup bowl 50 0.6 0.94\n", - " space bar 50 0.32 0.84\n", - " space heater 50 0.64 0.74\n", - " space shuttle 50 0.86 0.98\n", - " spatula 50 0.28 0.6\n", - " motorboat 50 0.94 1\n", - " spider web 50 0.76 0.96\n", - " spindle 50 0.92 1\n", - " sports car 50 0.5 0.96\n", - " spotlight 50 0.34 0.66\n", - " stage 50 0.76 0.92\n", - " steam locomotive 50 0.96 1\n", - " through arch bridge 50 0.82 0.96\n", - " steel drum 50 0.8 0.94\n", - " stethoscope 50 0.52 0.84\n", - " scarf 50 0.54 0.92\n", - " stone wall 50 0.8 0.92\n", - " stopwatch 50 0.54 0.9\n", - " stove 50 0.46 0.78\n", - " strainer 50 0.58 0.84\n", - " tram 50 0.9 0.96\n", - " stretcher 50 0.46 0.74\n", - " couch 50 0.72 0.94\n", - " stupa 50 0.84 0.9\n", - " submarine 50 0.78 0.9\n", - " suit 50 0.62 0.88\n", - " sundial 50 0.46 0.78\n", - " sunglass 50 0.18 0.6\n", - " sunglasses 50 0.32 0.64\n", - " sunscreen 50 0.32 0.7\n", - " suspension bridge 50 0.64 0.94\n", - " mop 50 0.8 0.96\n", - " sweatshirt 50 0.26 0.68\n", - " swimsuit 50 0.6 0.84\n", - " swing 50 0.78 0.88\n", - " switch 50 0.62 0.8\n", - " syringe 50 0.68 0.8\n", - " table lamp 50 0.54 0.88\n", - " tank 50 0.78 0.94\n", - " tape player 50 0.38 0.88\n", - " teapot 50 0.82 1\n", - " teddy bear 50 0.82 0.92\n", - " television 50 0.6 0.9\n", - " tennis ball 50 0.7 0.94\n", - " thatched roof 50 0.86 0.94\n", - " front curtain 50 0.76 0.94\n", - " thimble 50 0.68 0.82\n", - " threshing machine 50 0.64 0.9\n", - " throne 50 0.68 0.82\n", - " tile roof 50 0.84 0.96\n", - " toaster 50 0.64 0.82\n", - " tobacco shop 50 0.44 0.74\n", - " toilet seat 50 0.64 0.88\n", - " torch 50 0.62 0.86\n", - " totem pole 50 0.9 1\n", - " tow truck 50 0.64 0.92\n", - " toy store 50 0.64 0.9\n", - " tractor 50 0.86 0.98\n", - " semi-trailer truck 50 0.76 0.96\n", - " tray 50 0.54 0.76\n", - " trench coat 50 0.6 0.78\n", - " tricycle 50 0.78 0.96\n", - " trimaran 50 0.78 0.98\n", - " tripod 50 0.66 0.86\n", - " triumphal arch 50 0.92 0.98\n", - " trolleybus 50 0.98 1\n", - " trombone 50 0.66 0.94\n", - " tub 50 0.3 0.86\n", - " turnstile 50 0.8 0.9\n", - " typewriter keyboard 50 0.74 0.98\n", - " umbrella 50 0.6 0.78\n", - " unicycle 50 0.78 0.96\n", - " upright piano 50 0.84 0.94\n", - " vacuum cleaner 50 0.84 0.92\n", - " vase 50 0.56 0.74\n", - " vault 50 0.78 0.9\n", - " velvet 50 0.22 0.5\n", - " vending machine 50 0.94 1\n", - " vestment 50 0.62 0.86\n", - " viaduct 50 0.78 0.88\n", - " violin 50 0.64 0.88\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " volleyball 50 0.96 1\n", - " waffle iron 50 0.72 0.84\n", - " wall clock 50 0.58 0.86\n", - " wallet 50 0.58 0.94\n", - " wardrobe 50 0.7 0.9\n", - " military aircraft 50 0.9 0.98\n", - " sink 50 0.74 0.94\n", - " washing machine 50 0.82 0.94\n", - " water bottle 50 0.54 0.68\n", - " water jug 50 0.3 0.78\n", - " water tower 50 0.94 0.96\n", - " whiskey jug 50 0.64 0.76\n", - " whistle 50 0.7 0.82\n", - " wig 50 0.86 0.88\n", - " window screen 50 0.7 0.82\n", - " window shade 50 0.54 0.9\n", - " Windsor tie 50 0.32 0.64\n", - " wine bottle 50 0.46 0.76\n", - " wing 50 0.52 0.96\n", - " wok 50 0.54 0.92\n", - " wooden spoon 50 0.62 0.86\n", - " wool 50 0.42 0.84\n", - " split-rail fence 50 0.7 0.92\n", - " shipwreck 50 0.86 0.98\n", - " yawl 50 0.76 0.92\n", - " yurt 50 0.86 0.96\n", - " website 50 0.98 1\n", - " comic book 50 0.72 0.88\n", - " crossword 50 0.8 0.88\n", - " traffic sign 50 0.72 0.9\n", - " traffic light 50 0.8 0.96\n", - " dust jacket 50 0.78 0.94\n", - " menu 50 0.8 0.96\n", - " plate 50 0.44 0.86\n", - " guacamole 50 0.76 0.96\n", - " consomme 50 0.52 0.92\n", - " hot pot 50 0.78 1\n", - " trifle 50 0.9 1\n", - " ice cream 50 0.68 0.94\n", - " ice pop 50 0.68 0.8\n", - " baguette 50 0.62 0.88\n", - " bagel 50 0.64 0.86\n", - " pretzel 50 0.68 0.9\n", - " cheeseburger 50 0.92 0.96\n", - " hot dog 50 0.74 0.96\n", - " mashed potato 50 0.72 0.88\n", - " cabbage 50 0.88 0.98\n", - " broccoli 50 0.88 0.96\n", - " cauliflower 50 0.84 0.98\n", - " zucchini 50 0.68 0.98\n", - " spaghetti squash 50 0.82 0.96\n", - " acorn squash 50 0.8 1\n", - " butternut squash 50 0.72 0.94\n", - " cucumber 50 0.66 0.94\n", - " artichoke 50 0.86 0.96\n", - " bell pepper 50 0.86 0.94\n", - " cardoon 50 0.92 0.94\n", - " mushroom 50 0.38 0.96\n", - " Granny Smith 50 0.9 0.98\n", - " strawberry 50 0.64 0.88\n", - " orange 50 0.74 0.94\n", - " lemon 50 0.78 0.98\n", - " fig 50 0.84 0.94\n", - " pineapple 50 0.9 1\n", - " banana 50 0.88 0.98\n", - " jackfruit 50 0.96 0.98\n", - " custard apple 50 0.86 0.96\n", - " pomegranate 50 0.8 0.96\n", - " hay 50 0.84 0.96\n", - " carbonara 50 0.88 0.96\n", - " chocolate syrup 50 0.58 0.94\n", - " dough 50 0.36 0.68\n", - " meatloaf 50 0.64 0.88\n", - " pizza 50 0.78 0.9\n", - " pot pie 50 0.66 0.92\n", - " burrito 50 0.88 0.98\n", - " red wine 50 0.66 0.84\n", - " espresso 50 0.66 0.9\n", - " cup 50 0.42 0.78\n", - " eggnog 50 0.36 0.64\n", - " alp 50 0.54 0.94\n", - " bubble 50 0.86 0.96\n", - " cliff 50 0.66 1\n", - " coral reef 50 0.74 0.94\n", - " geyser 50 0.92 1\n", - " lakeshore 50 0.52 0.86\n", - " promontory 50 0.58 0.92\n", - " shoal 50 0.66 0.98\n", - " seashore 50 0.44 0.86\n", - " valley 50 0.72 0.98\n", - " volcano 50 0.72 0.94\n", - " baseball player 50 0.74 0.96\n", - " bridegroom 50 0.78 0.92\n", - " scuba diver 50 0.82 1\n", - " rapeseed 50 0.98 0.98\n", - " daisy 50 0.96 0.98\n", - " yellow lady's slipper 50 1 1\n", - " corn 50 0.42 0.86\n", - " acorn 50 0.96 0.98\n", - " rose hip 50 0.9 0.96\n", - " horse chestnut seed 50 1 1\n", - " coral fungus 50 0.98 0.98\n", - " agaric 50 0.84 0.94\n", - " gyromitra 50 0.98 0.98\n", - " stinkhorn mushroom 50 0.84 0.92\n", - " earth star 50 1 1\n", - " hen-of-the-woods 50 0.9 0.96\n", - " bolete 50 0.8 0.94\n", - " ear 50 0.54 0.94\n", - " toilet paper 50 0.44 0.68\n", - "Speed: 0.1ms pre-process, 0.2ms inference, 0.0ms post-process per image at shape (1, 3, 320, 320)\n", - "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" - ] - } - ], - "source": [ - "# Validate YOLOv5s on Imagenet val\n", - "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 320 --half" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZY2VXXXu74w5" - }, - "source": [ - "# 3. Train\n", - "\n", - "

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "outputs": [], - "source": [ - "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", - "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train\n", - "elif logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " import clearml; clearml.browser_login()" + ] }, - "id": "1NcFxRcFdJ_O", - "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=160, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", - "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 7 commits. Use `git pull ultralytics master` or `git clone https://github.com/ultralytics/yolov5` to update.\n", - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0m⚠️ not found, install with `pip install albumentations` (recommended)\n", - "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", - "Image sizes 160 train, 160 test\n", - "Using 3 dataloader workers\n", - "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", - "\n", - " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", - " 1/3 0.369G 1.05 0.935 0.837 0.985: 100%|█████\n", - " 2/3 0.369G 0.767 0.873 0.859 0.982: 100%|█████\n", - " 3/3 0.369G 0.626 0.713 0.927 0.992: 100%|█████\n", - "\n", - "Training complete (0.025 hours)\n", - "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", - "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /home/paguerrie/datasets/imagenette160\n", - "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", - "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", - "Visualize: https://netron.app\n", - "\n" - ] - } - ], - "source": [ - "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", - "!python classify/train.py --img 160 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "15glLzbQx5u0" - }, - "source": [ - "# 4. Visualize" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nWOsI5wJR1o3" - }, - "source": [ - "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", - "\n", - "Getting started is easy:\n", - "```shell\n", - "pip install comet_ml # 1. install\n", - "export COMET_API_KEY= # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\"yolo-ui\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Lay2WsTjNJzP" - }, - "source": [ - "## ClearML Logging and Automation 🌟 NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", - "\n", - "\n", - "\"ClearML" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GMusP4OAxFu6" - }, - "outputs": [], - "source": [ - "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", - "import torch\n", - "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", - "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", - "results = model(im) # inference\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "YOLOv5 Classification Tutorial", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.12" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "0856bea36ec148b68522ff9c9eb258d8": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "638c55b1-dc45-4eee-cabc-4921dc61faf5" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", + "\n", + "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", + "100% 103M/103M [00:09<00:00, 11.1MB/s]\n", + "Unzipping /content/datasets/imagenette160.zip...\n", + "Dataset download success ✅ (13.2s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", + "\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n", + "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", + "Image sizes 224 train, 224 test\n", + "Using 1 dataloader workers\n", + "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", + "\n", + " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", + " 1/3 0.348G 1.31 1.09 0.794 0.979: 100% 592/592 [01:02<00:00, 9.47it/s]\n", + " 2/3 0.415G 1.09 0.852 0.883 0.99: 100% 592/592 [00:59<00:00, 10.00it/s]\n", + " 3/3 0.415G 0.954 0.776 0.907 0.994: 100% 592/592 [00:59<00:00, 9.89it/s]\n", + "\n", + "Training complete (0.051 hours)\n", + "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", + "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n", + "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", + "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", + "Visualize: https://netron.app\n", + "\n" + ] + } + ], + "source": [ + "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", + "!python classify/train.py --img 224 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" + ] }, - "0ace3934ec6f4d36a1b3a9e086390926": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] }, - "35e03ce5090346c9ae602891470fc555": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", - "max": 818322941, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", - "value": 818322941 - } + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] }, - "574140e4c4bc48c9a171541a02cd0211": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", - "placeholder": "​", - "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", - "value": "100%" - } + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] }, - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] }, - "60b913d755b34d638478e30705a2dde1": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] }, - "65881db1db8a4e9c930fab9172d45143": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] }, - "76879f6f2aa54637a7a07faeea2bd684": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] }, - "9b8caa3522fc4cbab31e13b5dfc7808d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", - "IPY_MODEL_35e03ce5090346c9ae602891470fc555", - "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" - ], - "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" - } + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "YOLOv5 Classification Tutorial", + "provenance": [] }, - "c942c208e72d46568b476bb0f2d75496": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", - "placeholder": "​", - "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", - "value": " 780M/780M [02:19<00:00, 6.24MB/s]" - } + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "d6b7a2243e0c4beca714d99dceec23d6": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" } - } - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From 9bf18554c3e4b250ba7063876f0191f573ffb7a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 23:46:15 +0100 Subject: [PATCH 268/326] Revert `--save-txt` to default False (#10213) * Revert `--save-txt` to default False Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/predict.py | 2 +- classify/tutorial.ipynb | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 96508d633da8..a9104ed315ec 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -196,7 +196,7 @@ def parse_opt(): parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_false', help='save results to *.txt') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index e035a7bda40d..9e65e53d8736 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -118,8 +118,7 @@ "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.1ms\n", "Speed: 0.3ms pre-process, 4.0ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", - "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", - "2 labels saved to runs/predict-cls/exp/labels\n" + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n" ] } ], @@ -1475,4 +1474,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From 9bc60349b62500096832d78989336fcda200d286 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 23:48:47 +0100 Subject: [PATCH 269/326] Add `--source screen` Usage example (#10215) --- classify/predict.py | 1 + detect.py | 1 + segment/predict.py | 1 + 3 files changed, 3 insertions(+) diff --git a/classify/predict.py b/classify/predict.py index a9104ed315ec..9a6b00062932 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -6,6 +6,7 @@ $ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube diff --git a/detect.py b/detect.py index 8e42fbe159d0..58b02802e6d9 100644 --- a/detect.py +++ b/detect.py @@ -6,6 +6,7 @@ $ python detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube diff --git a/segment/predict.py b/segment/predict.py index da1097c047c1..42389938cee7 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -6,6 +6,7 @@ $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube From 9286336cb49d577873b2113739788bbe3b90f83c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 03:16:17 +0100 Subject: [PATCH 270/326] Add `git` info to training checkpoints (#9655) * Add git status on train checkpoints * Update * Update * Update * Update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- requirements.txt | 1 + train.py | 3 ++- utils/general.py | 19 ++++++++++++++++++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 70dd7ce53ba3..85eb839df8a0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ +gitpython ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 diff --git a/train.py b/train.py index bbbd6d07db00..6fa33f47d100 100644 --- a/train.py +++ b/train.py @@ -47,7 +47,7 @@ from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, +from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, @@ -376,6 +376,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), + 'git': GIT, 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/utils/general.py b/utils/general.py index 58181f00568d..57b6e4e78166 100644 --- a/utils/general.py +++ b/utils/general.py @@ -29,6 +29,7 @@ from zipfile import ZipFile, is_zipfile import cv2 +import git import IPython import numpy as np import pandas as pd @@ -344,6 +345,22 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'): LOGGER.info(s) +@WorkingDirectory(ROOT) +def check_git(path='.'): + # YOLOv5 git check, return git {remote, branch, commit} + try: + repo = git.Repo(path) + remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/ultralytics/yolov5' + commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d' + try: + branch = repo.active_branch.name # i.e. 'main' + except TypeError: # not on any branch + branch = None # i.e. 'detached HEAD' state + return {'remote': remote, 'branch': branch, 'commit': commit} + except git.exc.InvalidGitRepositoryError: # path is not a git dir + return {'remote': None, 'branch': None, 'commit': None} + + def check_python(minimum='3.7.0'): # Check current python version vs. required python version check_version(platform.python_version(), minimum, name='Python ', hard=True) @@ -1121,4 +1138,4 @@ def imshow(path, im): cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ -NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm +GIT = check_git() # repo, branch, commit From 0307954e4e17da66e6bf36950f02972d976ba621 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 03:32:21 +0100 Subject: [PATCH 271/326] Add git info to cls, seg checkpoints (#10217) --- classify/train.py | 3 ++- segment/train.py | 9 ++------- train.py | 2 +- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/classify/train.py b/classify/train.py index 4422ca26b0ae..5faef08e876c 100644 --- a/classify/train.py +++ b/classify/train.py @@ -40,7 +40,7 @@ from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, +from utils.general import (DATASETS_DIR, GIT, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls @@ -237,6 +237,7 @@ def train(opt, device): 'updates': ema.updates, 'optimizer': None, # optimizer.state_dict(), 'opt': vars(opt), + 'git': GIT, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/segment/train.py b/segment/train.py index 2a0793d1aa3e..5d9ed78f527c 100644 --- a/segment/train.py +++ b/segment/train.py @@ -46,7 +46,7 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, +from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) @@ -390,6 +390,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), + 'git': GIT, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete @@ -498,12 +499,6 @@ def parse_opt(known=False): parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') - # Weights & Biases arguments - # parser.add_argument('--entity', default=None, help='W&B: Entity') - # parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - # parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - # parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') - return parser.parse_known_args()[0] if known else parser.parse_args() diff --git a/train.py b/train.py index 6fa33f47d100..1ea5c5bbeddd 100644 --- a/train.py +++ b/train.py @@ -376,7 +376,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, + 'git': GIT, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete From 6992dde4bd628f6bffe7d4c5025afadf79ed679b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 13:44:46 +0100 Subject: [PATCH 272/326] Update Comet preview image (#10220) * Update Comet preview image Pass through tinyjpg: 2.2MB -> 497kB :) Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/tutorial.ipynb | 2 +- segment/tutorial.ipynb | 4 ++-- tutorial.ipynb | 4 ++-- utils/loggers/comet/README.md | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 9e65e53d8736..956452a5aeda 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1351,7 +1351,7 @@ "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\"yolo-ui\"" ] }, { diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 4192c69da628..70bbf857d02b 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -466,7 +466,7 @@ "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\"yolo-ui\"" ] }, { @@ -590,4 +590,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorial.ipynb b/tutorial.ipynb index 9d5aa9c85c51..6cf99650ad45 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -872,7 +872,7 @@ "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\"yolo-ui\"" ], "metadata": { "id": "nWOsI5wJR1o3" @@ -972,4 +972,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 3a51cb9b5a25..8f206cd9830e 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -51,7 +51,7 @@ python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yo That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI -yolo-ui +yolo-ui # Try out an Example! Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) From 40bb8030f8468eb7145ff648588aa5f96e32447c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 14:22:07 +0100 Subject: [PATCH 273/326] Scope gitpyhon import in `check_git_info()` (#10221) * Scope gitpyhon import in `check_git_info()` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/train.py | 5 +++-- segment/train.py | 9 +++++---- train.py | 9 +++++---- utils/general.py | 9 ++++----- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/classify/train.py b/classify/train.py index 5faef08e876c..a50845a4f781 100644 --- a/classify/train.py +++ b/classify/train.py @@ -40,7 +40,7 @@ from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, GIT, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, +from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status, check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls @@ -50,6 +50,7 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() def train(opt, device): @@ -237,7 +238,7 @@ def train(opt, device): 'updates': ema.updates, 'optimizer': None, # optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, # {remote, branch, commit} if a git repo + 'git': GIT_INFO, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/segment/train.py b/segment/train.py index 5d9ed78f527c..3f32d2100a75 100644 --- a/segment/train.py +++ b/segment/train.py @@ -46,9 +46,9 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.downloads import attempt_download, is_url -from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, - check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, - increment_path, init_seeds, intersect_dicts, labels_to_class_weights, +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import GenericLogger from utils.plots import plot_evolve, plot_labels @@ -62,6 +62,7 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary @@ -390,7 +391,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, # {remote, branch, commit} if a git repo + 'git': GIT_INFO, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/train.py b/train.py index 1ea5c5bbeddd..8b5446e58f2d 100644 --- a/train.py +++ b/train.py @@ -47,9 +47,9 @@ from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url -from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, - check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, - increment_path, init_seeds, intersect_dicts, labels_to_class_weights, +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers @@ -63,6 +63,7 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary @@ -376,7 +377,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, # {remote, branch, commit} if a git repo + 'git': GIT_INFO, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/utils/general.py b/utils/general.py index 57b6e4e78166..c5b738983719 100644 --- a/utils/general.py +++ b/utils/general.py @@ -13,7 +13,6 @@ import platform import random import re -import shutil import signal import sys import time @@ -29,7 +28,6 @@ from zipfile import ZipFile, is_zipfile import cv2 -import git import IPython import numpy as np import pandas as pd @@ -346,8 +344,10 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'): @WorkingDirectory(ROOT) -def check_git(path='.'): - # YOLOv5 git check, return git {remote, branch, commit} +def check_git_info(path='.'): + # YOLOv5 git info check, return {remote, branch, commit} + check_requirements('gitpython') + import git try: repo = git.Repo(path) remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/ultralytics/yolov5' @@ -1138,4 +1138,3 @@ def imshow(path, im): cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ -GIT = check_git() # repo, branch, commit From 72cad39854a7d9ebbd4d58994cefa966b0da8fc1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 16:44:56 +0100 Subject: [PATCH 274/326] Squeezenet reshape outputs fix (#10222) @AyushExel Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index fe934abf118c..77549b005ceb 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -82,7 +82,7 @@ def reshape_classifier_output(model, n=1000): elif nn.Conv2d in types: i = types.index(nn.Conv2d) # nn.Conv2d index if m[i].out_channels != n: - m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias) + m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) @contextmanager From be348cc33925738825ab40dd6eacdfe4afd4e215 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Nov 2022 16:54:36 +0100 Subject: [PATCH 275/326] Validate --task speed CPU fix (#10244) --- segment/val.py | 2 +- val.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/val.py b/segment/val.py index 9bb8f9e4cf54..48bf28d4bf4f 100644 --- a/segment/val.py +++ b/segment/val.py @@ -444,7 +444,7 @@ def main(opt): else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = True # FP16 for fastest results + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results if opt.task == 'speed': # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False diff --git a/val.py b/val.py index ef282e37bdc1..7c610e83a856 100644 --- a/val.py +++ b/val.py @@ -380,7 +380,7 @@ def main(opt): else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = True # FP16 for fastest results + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results if opt.task == 'speed': # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False From 915bbf294bb74c859f0b41f1c23bc395014ea679 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Nov 2022 16:23:47 +0100 Subject: [PATCH 276/326] YOLOv5 v7.0 release updates (#10245) * YOLOv5 v7.0 splash image update * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * readme segmentation section * readme segmentation section * readme segmentation section * readme segmentation section * readme segmentation section * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update download URLs to 7.0 assets Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 2 +- README.md | 114 +++++++++++++++++++++++++------ classify/tutorial.ipynb | 5 +- data/scripts/download_weights.sh | 5 +- segment/tutorial.ipynb | 2 +- tutorial.ipynb | 2 +- utils/downloads.py | 8 +-- 7 files changed, 107 insertions(+), 31 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 65ecd31a3e69..0a2f61ee35b2 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,7 +1,7 @@

- +

[English](../README.md) | 简体中文 diff --git a/README.md b/README.md index 0fa95f404117..298e14570860 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

English | [简体中文](.github/README_cn.md) @@ -50,6 +50,79 @@
+##
Segmentation ⭐ NEW
+ +
+ + +
+ +Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. + +
+ Segmentation Checkpoints + +
+ +We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +|----------------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|-----------------------------------------------|--------------------------------|--------------------------------|--------------------|------------------------| +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### Train +YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 --device 0,1,2,3 +``` + +### Val +Validate YOLOv5m-seg accuracy on ImageNet-1k dataset: +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### Predict +Use pretrained YOLOv5m-seg.pt to predict bus.jpg: +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` +```python +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) +--- |--- + +### Export +Export YOLOv5s-seg model to ONNX and TensorRT: +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ + ##
Documentation
See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples. @@ -200,12 +273,12 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We

- YOLOv5-P5 640 Figure (click to expand) + YOLOv5-P5 640 Figure

- Figure Notes (click to expand) + Figure Notes - **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. - **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. @@ -216,22 +289,22 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We ### Pretrained Checkpoints -| Model | size
(pixels) | mAPval
0.5:0.95 | mAPval
0.5 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | +|------------------------------------------------------------------------------------------------------|-----------------------|----------------------|-------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
- Table Notes (click to expand) + Table Notes - All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). - **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` @@ -240,12 +313,13 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
+ ##
Classification ⭐ NEW
-YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation, prediction and export! We've made training classifier models super simple. Click below to get started. +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials.
- Classification Checkpoints (click to expand) + Classification Checkpoints
@@ -280,7 +354,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x
- Classification Usage Examples (click to expand) + Classification Usage Examples  Open In Colab ### Train YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 956452a5aeda..a3da0dbd3231 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -1452,7 +1452,8 @@ "accelerator": "GPU", "colab": { "name": "YOLOv5 Classification Tutorial", - "provenance": [] + "provenance": [], + "toc_visible": true }, "kernelspec": { "display_name": "Python 3 (ipykernel)", diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index a4f3becfdbeb..31e0a15569f2 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -11,11 +11,12 @@ python - <\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", diff --git a/tutorial.ipynb b/tutorial.ipynb index 6cf99650ad45..7d7f1649cc8d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -369,7 +369,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", diff --git a/utils/downloads.py b/utils/downloads.py index 21bb6608d5ba..72ea87340eb9 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -59,14 +59,14 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): LOGGER.info('') -def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'): - # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc. +def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'): + # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v7.0', etc. from utils.general import LOGGER def github_assets(repository, version='latest'): - # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) if version != 'latest': - version = f'tags/{version}' # i.e. tags/v6.2 + version = f'tags/{version}' # i.e. tags/v7.0 response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets From b32f67f6beb4a921c98301fe7724003e23103728 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Nov 2022 20:30:14 +0100 Subject: [PATCH 277/326] `--single-cls` segments fix (#10260) --single-cls segments fix May resolve #10230 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index e107d1a2bccf..cc5f8843ef18 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -537,8 +537,6 @@ def __init__(self, self.segments[i] = segment[j] if single_cls: # single-class training, merge all classes into 0 self.labels[i][:, 0] = 0 - if segment: - self.segments[i][:, 0] = 0 # Rectangular Training if self.rect: From c9d47ae05632e2a42e560fbfeb22d3780224546c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Nov 2022 20:37:45 +0100 Subject: [PATCH 278/326] Created using Colaboratory --- tutorial.ipynb | 142 ++++++++++++++++++++++++------------------------- 1 file changed, 71 insertions(+), 71 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7d7f1649cc8d..657dc266da92 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -14,7 +14,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "300b4d5355ef4967bd5246afeef6eef5": { + "1f7df330663048998adcf8a45bc8f69b": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -29,14 +29,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_84e6829bb88845a8a4f42700b8496925", - "IPY_MODEL_c038e52d41bf4d5b9602930c3d074087", - "IPY_MODEL_2667604641764341b0bc8c6afea438fd" + "IPY_MODEL_e896e6096dd244c59d7955e2035cd729", + "IPY_MODEL_a6ff238c29984b24bf6d0bd175c19430", + "IPY_MODEL_3c085ba3f3fd4c3c8a6bb41b41ce1479" ], - "layout": "IPY_MODEL_98b3a4806ed14102b0d75e6c571d6134" + "layout": "IPY_MODEL_16b0c8aa6e0f427e8a54d3791abb7504" } }, - "84e6829bb88845a8a4f42700b8496925": { + "e896e6096dd244c59d7955e2035cd729": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -51,13 +51,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_c66a77395e42424d904699edcbb67291", + "layout": "IPY_MODEL_c7b2dd0f78384cad8e400b282996cdf5", "placeholder": "​", - "style": "IPY_MODEL_c4bbc15bf853439399dbcf1d40a5a407", + "style": "IPY_MODEL_6a27e43b0e434edd82ee63f0a91036ca", "value": "100%" } }, - "c038e52d41bf4d5b9602930c3d074087": { + "a6ff238c29984b24bf6d0bd175c19430": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -73,15 +73,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_0aaabfac395b43afbdd6d752c502bbf6", + "layout": "IPY_MODEL_cce0e6c0c4ec442cb47e65c674e02e92", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_3786d970492b4aa38f886f2572fd958c", + "style": "IPY_MODEL_c5b9f38e2f0d4f9aa97fe87265263743", "value": 818322941 } }, - "2667604641764341b0bc8c6afea438fd": { + "3c085ba3f3fd4c3c8a6bb41b41ce1479": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -96,13 +96,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_b86d0f2d7be74cebbcaa884b53123eeb", + "layout": "IPY_MODEL_df554fb955c7454696beac5a82889386", "placeholder": "​", - "style": "IPY_MODEL_fa7b1497925a457f89286a71f073f416", - "value": " 780M/780M [00:57<00:00, 10.1MB/s]" + "style": "IPY_MODEL_74e9112a87a242f4831b7d68c7da6333", + "value": " 780M/780M [00:05<00:00, 126MB/s]" } }, - "98b3a4806ed14102b0d75e6c571d6134": { + "16b0c8aa6e0f427e8a54d3791abb7504": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -154,7 +154,7 @@ "width": null } }, - "c66a77395e42424d904699edcbb67291": { + "c7b2dd0f78384cad8e400b282996cdf5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -206,7 +206,7 @@ "width": null } }, - "c4bbc15bf853439399dbcf1d40a5a407": { + "6a27e43b0e434edd82ee63f0a91036ca": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -221,7 +221,7 @@ "description_width": "" } }, - "0aaabfac395b43afbdd6d752c502bbf6": { + "cce0e6c0c4ec442cb47e65c674e02e92": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -273,7 +273,7 @@ "width": null } }, - "3786d970492b4aa38f886f2572fd958c": { + "c5b9f38e2f0d4f9aa97fe87265263743": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -289,7 +289,7 @@ "description_width": "" } }, - "b86d0f2d7be74cebbcaa884b53123eeb": { + "df554fb955c7454696beac5a82889386": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -341,7 +341,7 @@ "width": null } }, - "fa7b1497925a457f89286a71f073f416": { + "74e9112a87a242f4831b7d68c7da6333": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -401,7 +401,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "32e3bc15-6d02-4352-f0a3-912059d134a5" + "outputId": "f9f016ad-3dcf-4bd2-e1c3-d5b79efc6f32" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -418,7 +418,7 @@ "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -446,9 +446,9 @@ " vid.mp4 # video\n", " screen # screenshot\n", " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", "```" ] }, @@ -459,7 +459,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "8e81d6e9-0360-4212-cd61-9a5a58d3f703" + "outputId": "b4db5c49-f501-4505-cf0d-a1d35236c485" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", @@ -472,16 +472,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 19.5MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt to yolov5s.pt...\n", + "100% 14.1M/14.1M [00:00<00:00, 116MB/s] \n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.5ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 18.0ms\n", - "Speed: 0.5ms pre-process, 17.8ms inference, 17.6ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.0ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 14.3ms\n", + "Speed: 0.5ms pre-process, 15.7ms inference, 18.6ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -515,20 +515,20 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "300b4d5355ef4967bd5246afeef6eef5", - "84e6829bb88845a8a4f42700b8496925", - "c038e52d41bf4d5b9602930c3d074087", - "2667604641764341b0bc8c6afea438fd", - "98b3a4806ed14102b0d75e6c571d6134", - "c66a77395e42424d904699edcbb67291", - "c4bbc15bf853439399dbcf1d40a5a407", - "0aaabfac395b43afbdd6d752c502bbf6", - "3786d970492b4aa38f886f2572fd958c", - "b86d0f2d7be74cebbcaa884b53123eeb", - "fa7b1497925a457f89286a71f073f416" + "1f7df330663048998adcf8a45bc8f69b", + "e896e6096dd244c59d7955e2035cd729", + "a6ff238c29984b24bf6d0bd175c19430", + "3c085ba3f3fd4c3c8a6bb41b41ce1479", + "16b0c8aa6e0f427e8a54d3791abb7504", + "c7b2dd0f78384cad8e400b282996cdf5", + "6a27e43b0e434edd82ee63f0a91036ca", + "cce0e6c0c4ec442cb47e65c674e02e92", + "c5b9f38e2f0d4f9aa97fe87265263743", + "df554fb955c7454696beac5a82889386", + "74e9112a87a242f4831b7d68c7da6333" ] }, - "outputId": "61ffec5e-90ea-44f6-c0ea-b006e6e7072f" + "outputId": "c7d0a0d2-abfb-44c3-d60d-f99d0e7aabad" }, "source": [ "# Download COCO val\n", @@ -546,7 +546,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "300b4d5355ef4967bd5246afeef6eef5" + "model_id": "1f7df330663048998adcf8a45bc8f69b" } }, "metadata": {} @@ -560,7 +560,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "aa5d5cea-14c1-4a19-bfdf-95b7164962cf" + "outputId": "5fc61358-7bc5-4310-a310-9059f66c6322" }, "source": [ "# Validate YOLOv5s on COCO val\n", @@ -573,30 +573,30 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2066.57it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 1977.30it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.26it/s]\n", + " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:12<00:00, 2.17it/s]\n", " all 5000 36335 0.67 0.521 0.566 0.371\n", - "Speed: 0.1ms pre-process, 2.7ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 2.9ms inference, 2.0ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.82s)\n", + "Done (t=0.43s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.49s)\n", + "DONE (t=5.85s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=74.26s).\n", + "DONE (t=82.22s).\n", "Accumulating evaluation results...\n", - "DONE (t=13.46s).\n", + "DONE (t=14.92s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", @@ -676,7 +676,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "f0fcdc77-5326-41e1-bacc-be5432eefa2a" + "outputId": "721b9028-767f-4a05-c964-692c245f7398" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", @@ -690,7 +690,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", @@ -699,8 +699,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 39.8MB/s]\n", - "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 261MB/s]\n", + "Dataset download success ✅ (0.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -734,11 +734,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 2084.63it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1911.57it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 255.09it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 229.69it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 Date: Tue, 22 Nov 2022 20:47:54 +0100 Subject: [PATCH 279/326] Created using Colaboratory --- segment/tutorial.ipynb | 62 +++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index ad44f31d3833..09ca963d4b98 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -42,14 +42,14 @@ "base_uri": "https://localhost:8080/" }, "id": "wbvMlHd_QwMG", - "outputId": "664f49fa-554a-4dca-8d0e-5c9dd60f6d28" + "outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -100,7 +100,7 @@ "base_uri": "https://localhost:8080/" }, "id": "zR9ZbuQCH7FX", - "outputId": "6392c9ff-0863-4665-faf9-b3af9881c305" + "outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad" }, "outputs": [ { @@ -108,16 +108,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-seg.pt to yolov5s-seg.pt...\n", - "100% 14.9M/14.9M [00:01<00:00, 9.09MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n", + "100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.0ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.5ms\n", - "Speed: 0.5ms pre-process, 15.7ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n", + "Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" ] } @@ -155,7 +155,7 @@ "base_uri": "https://localhost:8080/" }, "id": "WQPtK1QYVaD_", - "outputId": "4707734e-00c7-43da-d642-32c3c3fe3090" + "outputId": "9d751d8c-bee8-4339-cf30-9854ca530449" }, "outputs": [ { @@ -182,7 +182,7 @@ "base_uri": "https://localhost:8080/" }, "id": "X58w8JLpMnjH", - "outputId": "f96b700d-c779-4a34-930b-e85be4e58974" + "outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a" }, "outputs": [ { @@ -190,15 +190,15 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1409.04it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:53<00:00, 1.38it/s]\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", - "Speed: 0.8ms pre-process, 4.0ms inference, 2.8ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n", "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" ] } @@ -270,7 +270,7 @@ "base_uri": "https://localhost:8080/" }, "id": "1NcFxRcFdJ_O", - "outputId": "2cdb19cc-69af-4c90-f8de-af02dfedba91" + "outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988" }, "outputs": [ { @@ -279,15 +279,15 @@ "text": [ "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", - "100% 6.79M/6.79M [00:01<00:00, 5.87MB/s]\n", - "Dataset download success ✅ (2.1s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n", + "Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -321,11 +321,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1439.54it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 253.53it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 Date: Tue, 22 Nov 2022 21:27:33 +0100 Subject: [PATCH 280/326] Created using Colaboratory --- classify/tutorial.ipynb | 63 +++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index a3da0dbd3231..c6f5d0d88a2d 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -42,14 +42,14 @@ "base_uri": "https://localhost:8080/" }, "id": "wbvMlHd_QwMG", - "outputId": "43b2e1b5-78d9-4e1d-8530-ee9779bba160" + "outputId": "0806e375-610d-4ec0-c867-763dbb518279" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -100,24 +100,24 @@ "base_uri": "https://localhost:8080/" }, "id": "zR9ZbuQCH7FX", - "outputId": "1b610787-7cf7-4c33-aac2-aa50fbb84a94" + "outputId": "50504ef7-aa3e-4281-a4e3-d0c7df3c0ffe" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt to yolov5s-cls.pt...\n", - "100% 10.5M/10.5M [00:03<00:00, 2.94MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt to yolov5s-cls.pt...\n", + "100% 10.5M/10.5M [00:00<00:00, 12.3MB/s]\n", "\n", "Fusing layers... \n", "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.1ms\n", - "Speed: 0.3ms pre-process, 4.0ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.6ms\n", + "Speed: 0.3ms pre-process, 4.3ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n" ] } @@ -155,23 +155,23 @@ "base_uri": "https://localhost:8080/" }, "id": "WQPtK1QYVaD_", - "outputId": "92de5f34-cf41-49e7-b679-41db94e995ac" + "outputId": "20fc0630-141e-4a90-ea06-342cbd7ce496" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "--2022-11-18 21:48:38-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", + "--2022-11-22 19:53:40-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", "Resolving image-net.org (image-net.org)... 171.64.68.16\n", "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 6744924160 (6.3G) [application/x-tar]\n", "Saving to: ‘ILSVRC2012_img_val.tar’\n", "\n", - "ILSVRC2012_img_val. 100%[===================>] 6.28G 7.15MB/s in 11m 13s \n", + "ILSVRC2012_img_val. 100%[===================>] 6.28G 16.1MB/s in 10m 52s \n", "\n", - "2022-11-18 21:59:52 (9.55 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", + "2022-11-22 20:04:32 (9.87 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", "\n" ] } @@ -189,7 +189,7 @@ "base_uri": "https://localhost:8080/" }, "id": "X58w8JLpMnjH", - "outputId": "9961ad87-d639-4489-b578-0a0578fefaab" + "outputId": "41843132-98e2-4c25-d474-4cd7b246fb8e" }, "outputs": [ { @@ -197,11 +197,11 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "validating: 100% 391/391 [04:48<00:00, 1.35it/s]\n", + "validating: 100% 391/391 [04:57<00:00, 1.31it/s]\n", " Class Images top1_acc top5_acc\n", " all 50000 0.715 0.902\n", " tench 50 0.94 0.98\n", @@ -1269,30 +1269,30 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 10, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "1NcFxRcFdJ_O", - "outputId": "638c55b1-dc45-4eee-cabc-4921dc61faf5" + "outputId": "77c8d487-16db-4073-b3ea-06cabf2e7766" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=5, batch_size=64, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", - "100% 103M/103M [00:09<00:00, 11.1MB/s]\n", + "100% 103M/103M [00:00<00:00, 347MB/s] \n", "Unzipping /content/datasets/imagenette160.zip...\n", - "Dataset download success ✅ (13.2s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", + "Dataset download success ✅ (3.3s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", "\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n", "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", @@ -1300,14 +1300,16 @@ "Image sizes 224 train, 224 test\n", "Using 1 dataloader workers\n", "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 5 epochs...\n", "\n", " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", - " 1/3 0.348G 1.31 1.09 0.794 0.979: 100% 592/592 [01:02<00:00, 9.47it/s]\n", - " 2/3 0.415G 1.09 0.852 0.883 0.99: 100% 592/592 [00:59<00:00, 10.00it/s]\n", - " 3/3 0.415G 0.954 0.776 0.907 0.994: 100% 592/592 [00:59<00:00, 9.89it/s]\n", + " 1/5 1.47G 1.05 0.974 0.828 0.975: 100% 148/148 [00:38<00:00, 3.82it/s]\n", + " 2/5 1.73G 0.895 0.766 0.911 0.994: 100% 148/148 [00:36<00:00, 4.03it/s]\n", + " 3/5 1.73G 0.82 0.704 0.934 0.996: 100% 148/148 [00:35<00:00, 4.20it/s]\n", + " 4/5 1.73G 0.766 0.664 0.951 0.998: 100% 148/148 [00:36<00:00, 4.05it/s]\n", + " 5/5 1.73G 0.724 0.634 0.959 0.997: 100% 148/148 [00:37<00:00, 3.94it/s]\n", "\n", - "Training complete (0.051 hours)\n", + "Training complete (0.052 hours)\n", "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n", @@ -1320,7 +1322,7 @@ ], "source": [ "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", - "!python classify/train.py --img 224 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" + "!python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 --cache" ] }, { @@ -1452,8 +1454,7 @@ "accelerator": "GPU", "colab": { "name": "YOLOv5 Classification Tutorial", - "provenance": [], - "toc_visible": true + "provenance": [] }, "kernelspec": { "display_name": "Python 3 (ipykernel)", @@ -1475,4 +1476,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From bfa1f23045c7c4136a9b8ced9d6be8249ed72692 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Nov 2022 16:34:14 +0100 Subject: [PATCH 281/326] FROM nvcr.io/nvidia/pytorch:22.11-py3 (#10279) * Update Docker usage examples * Update Dockerfile Signed-off-by: Glenn Jocher * Update DEBIAN_FRONTEND Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 8 +++++--- utils/docker/Dockerfile-cpu | 4 +++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index a5035c6abc33..1ecf4c64f75f 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.10-py3 +FROM nvcr.io/nvidia/pytorch:22.11-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 8ec71622d9b6..eed1410793a1 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -9,8 +9,9 @@ FROM arm64v8/ubuntu:20.04 ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages +ENV DEBIAN_FRONTEND noninteractive RUN apt update -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev # RUN alias python=python3 @@ -30,12 +31,13 @@ WORKDIR /usr/src/app # Copy contents # COPY . /usr/src/app (issues as not a .git directory) RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +ENV DEBIAN_FRONTEND teletype # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push -# t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t +# t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t # Pull and Run -# t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t +# t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 017e2826458b..558f81f00584 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -9,8 +9,9 @@ FROM ubuntu:20.04 ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages +ENV DEBIAN_FRONTEND noninteractive RUN apt update -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 @@ -29,6 +30,7 @@ WORKDIR /usr/src/app # Copy contents # COPY . /usr/src/app (issues as not a .git directory) RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +ENV DEBIAN_FRONTEND teletype # Usage Examples ------------------------------------------------------------------------------------------------------- From 31c1f111868fc0dd7140ddce13e743f79bfaa9d4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Nov 2022 00:28:50 +0100 Subject: [PATCH 282/326] `bbox_iou()` optimizations (#10296) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/metrics.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 65ea463c0dab..0be462551b89 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -234,12 +234,12 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 else: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps) + w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps) # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \ + (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0) # Union Area union = w1 * h1 + w2 * h2 - inter + eps @@ -247,13 +247,13 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 # IoU iou = inter / union if CIoU or DIoU or GIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width + ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU From 85f8379a68193cd9a9298e31035f01d304ac21f9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Nov 2022 06:06:22 +0100 Subject: [PATCH 283/326] README Segmentation Usage fixes (#10298) Fixes per https://github.com/ultralytics/yolov5/issues/10288 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 298e14570860..028a1c2f064c 100644 --- a/README.md +++ b/README.md @@ -89,14 +89,14 @@ YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dat ```bash # Single-GPU -python segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 --device 0,1,2,3 +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 ``` ### Val -Validate YOLOv5m-seg accuracy on ImageNet-1k dataset: +Validate YOLOv5s-seg mask mAP on COCO dataset: ```bash bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate From 350e8eb69e01bb162ec0b22d1d13a1d1c2752853 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Nov 2022 17:33:43 +0100 Subject: [PATCH 284/326] Fix SegmentationModel Usage (#10303) Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/export.py b/export.py index e43d9b730fc6..3028e6581e63 100644 --- a/export.py +++ b/export.py @@ -596,6 +596,7 @@ def run( f = [str(x) for x in f if x] # filter out '' and None if any(f): cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type + det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ From f9ca3657f822da65a784aae7d750d86b69244ecb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 28 Nov 2022 09:20:59 -0800 Subject: [PATCH 285/326] Ultralytics Live 1 - ClearML https://youtu.be/KS4weDInJYs (#10324) * Ultralytics Live Session banner - ClearML @taliabender @thepycoder @pderrenger Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 028a1c2f064c..96f40e0f040a 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,15 @@
+##
Ultralytics Live - November 29th
+ +
+ +We're excited to announce our very first [Ultralytics Live](https://www.youtube.com/@Ultralytics/streams) session ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥 + + +
+ ##
Segmentation ⭐ NEW
From 10c025d794ca395a2ca0b2a00aff65f3a92ecd8d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 28 Nov 2022 14:50:29 -0800 Subject: [PATCH 286/326] Add README License section (#10327) * Add README License section @pderrenger @AyushExel Signed-off-by: Glenn Jocher * live fix Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 96f40e0f040a..53d37d2bcb35 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.

- To request a commercial license please complete the form at Ultralytics Licensing. + To request an Enterprise License please complete the form at Ultralytics Licensing.

@@ -50,11 +50,11 @@
-##
Ultralytics Live - November 29th
+##
Ultralytics Live Session
-We're excited to announce our very first [Ultralytics Live](https://www.youtube.com/@Ultralytics/streams) session ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥 +We're excited to announce our very first [Ultralytics Live Session](https://www.youtube.com/@Ultralytics/streams) ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th at 16:00 CET** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥
@@ -432,9 +432,18 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare + +##
License
+ +YOLOv5 is available under two different licenses: + +- **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. +- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). + + ##
Contact
-For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). To request a commercial license please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). +For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact).
From e808f2267d0164edb7bc45588c4fcda68c3dd8cb Mon Sep 17 00:00:00 2001 From: Hu Ye Date: Wed, 30 Nov 2022 11:32:34 +0800 Subject: [PATCH 287/326] Eliminate unused `ConfusionMatrix.matrix()` method (#10309) * fix bug in confusion_matrix Signed-off-by: Hu Ye * Update metrics.py * Update metrics.py * Update metrics.py Signed-off-by: Hu Ye Co-authored-by: Glenn Jocher --- utils/metrics.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 0be462551b89..c01f823a77a1 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -177,9 +177,6 @@ def process_batch(self, detections, labels): if not any(m1 == i): self.matrix[dc, self.nc] += 1 # predicted background - def matrix(self): - return self.matrix - def tp_fp(self): tp = self.matrix.diagonal() # true positives fp = self.matrix.sum(1) - tp # false positives From 7f5724ba4b3e421d4c9162742810c52248d06ecd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 29 Nov 2022 19:38:29 -0800 Subject: [PATCH 288/326] Correct Segmentation Comparison Plot (#10344) @AyushExel @Laughing-q updated plot here in README Addresses https://github.com/ultralytics/yolov5/pull/10245#issuecomment-1328482213 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 53d37d2bcb35..dd24a938a060 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ We're excited to announce our very first [Ultralytics Live Session](https://www.
- +
Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. From b412696ff339fc573320f143290d4fb7146832b3 Mon Sep 17 00:00:00 2001 From: Laughing <61612323+Laughing-q@users.noreply.github.com> Date: Thu, 1 Dec 2022 10:39:24 -0600 Subject: [PATCH 289/326] Fix & speed up segment plot (#10350) * fix plot&&speed up * fix segment save-txt * fix channel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/predict.py | 21 ++++++++++++------ utils/plots.py | 48 ++++++++++++++-------------------------- utils/segment/general.py | 23 +++++++++++++++++++ 3 files changed, 53 insertions(+), 39 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 42389938cee7..4d8458fd879e 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -46,7 +46,7 @@ increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.segment.general import masks2segments, process_mask +from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode @@ -151,13 +151,20 @@ def run( imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): - masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size + if retina_masks: + # scale bbox first the crop masks + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size + masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC + else: + masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) - segments = [scale_segments(im.shape[2:], x, im0.shape, normalize=True) for x in segments] + segments = [ + scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) + for x in segments] # Print results for c in det[:, 5].unique(): @@ -165,9 +172,9 @@ def run( s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting - annotator.masks(masks, - colors=[colors(x, True) for x in det[:, 5]], - im_gpu=None if retina_masks else im[i]) + plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ + if retina_masks else im[i] + annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): diff --git a/utils/plots.py b/utils/plots.py index 36df271c60e1..d2f232de0e97 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -114,7 +114,7 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 thickness=tf, lineType=cv2.LINE_AA) - def masks(self, masks, colors, im_gpu=None, alpha=0.5): + def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): """Plot masks at once. Args: masks (tensor): predicted masks on cuda, shape: [n, h, w] @@ -125,37 +125,21 @@ def masks(self, masks, colors, im_gpu=None, alpha=0.5): if self.pil: # convert to numpy first self.im = np.asarray(self.im).copy() - if im_gpu is None: - # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...) - if len(masks) == 0: - return - if isinstance(masks, torch.Tensor): - masks = torch.as_tensor(masks, dtype=torch.uint8) - masks = masks.permute(1, 2, 0).contiguous() - masks = masks.cpu().numpy() - # masks = np.ascontiguousarray(masks.transpose(1, 2, 0)) - masks = scale_image(masks.shape[:2], masks, self.im.shape) - masks = np.asarray(masks, dtype=np.float32) - colors = np.asarray(colors, dtype=np.float32) # shape(n,3) - s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together - masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3) - self.im[:] = masks * alpha + self.im * (1 - s * alpha) - else: - if len(masks) == 0: - self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 - colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 - colors = colors[:, None, None] # shape(n,1,1,3) - masks = masks.unsqueeze(3) # shape(n,h,w,1) - masks_color = masks * (colors * alpha) # shape(n,h,w,3) - - inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) - mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) - - im_gpu = im_gpu.flip(dims=[0]) # flip channel - im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) - im_gpu = im_gpu * inv_alph_masks[-1] + mcs - im_mask = (im_gpu * 255).byte().cpu().numpy() - self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape) + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255).byte().cpu().numpy() + self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape) if self.pil: # convert im back to PIL and update draw self.fromarray(self.im) diff --git a/utils/segment/general.py b/utils/segment/general.py index b526333dc5a1..6ebfd27bd9d3 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -67,6 +67,29 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): return masks.gt_(0.5) +def process_mask_native(protos, masks_in, bboxes, dst_shape): + """ + Crop after upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + gain = min(mh / dst_shape[0], mw / dst_shape[1]) # gain = old / new + pad = (mw - dst_shape[1] * gain) / 2, (mh - dst_shape[0] * gain) / 2 # wh padding + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(mh - pad[1]), int(mw - pad[0]) + masks = masks[:, top:bottom, left:right] + + masks = F.interpolate(masks[None], dst_shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): """ img1_shape: model input shape, [h, w] From 028b7cdb5a2e650b4d9e79eaa90a00c1efdcbcba Mon Sep 17 00:00:00 2001 From: Michael Ben ami <31584614+mbenami@users.noreply.github.com> Date: Thu, 1 Dec 2022 22:44:14 +0200 Subject: [PATCH 290/326] fix_reading_nan_in_evolve (#10358) when there is `nan` in evolve.csv pandas read it as str remove the space before fix that Signed-off-by: Michael Ben ami <31584614+mbenami@users.noreply.github.com> Signed-off-by: Michael Ben ami <31584614+mbenami@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index c5b738983719..efe8590f85a1 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1036,7 +1036,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve # Save yaml with open(evolve_yaml, 'w') as f: - data = pd.read_csv(evolve_csv) + data = pd.read_csv(evolve_csv, skipinitialspace=True) data = data.rename(columns=lambda x: x.strip()) # strip keys i = np.argmax(fitness(data.values[:, :4])) # generations = len(data) From 185d475d93ebd4c03b53b4eb6057a62a52018b24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Dec 2022 13:01:46 -0800 Subject: [PATCH 291/326] Add DNN warning comment (#10368) Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 3028e6581e63..928992903b0b 100644 --- a/export.py +++ b/export.py @@ -153,7 +153,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX f, verbose=False, opset_version=opset, - do_constant_folding=True, + do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False input_names=['images'], output_names=output_names, dynamic_axes=dynamic or None) From 1ce464f6890ed1afe887ab8eed78804ae5933aa8 Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Thu, 1 Dec 2022 22:32:55 +0100 Subject: [PATCH 292/326] Add docker info for ClearML remote execution (#10142) * Add docker info for ClearML remote execution * add additional clearml options to handle different python versions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/clearml/clearml_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index fe5f597a87a6..08aa9fd3327f 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -97,6 +97,11 @@ def __init__(self, opt, hyp): # will have to be added manually! self.task.connect(hyp, name='Hyperparameters') + # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent + self.task.set_base_docker("ultralytics/yolov5:latest", + docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', + docker_setup_bash_script='pip install clearml') + # Get ClearML Dataset Version if requested if opt.data.startswith('clearml://'): # data_dict should have the following keys: From 7845cea91343e430566689deff6e50f6c2b473fa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Dec 2022 13:56:33 -0800 Subject: [PATCH 293/326] Fix ClearML unconfigured error (#10369) @thepycoder adds Try Except for installed but unconfigured clearml environments. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/loggers/__init__.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index bc8dd7621579..22da87034f24 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -118,7 +118,14 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # ClearML if clearml and 'clearml' in self.include: - self.clearml = ClearmlLogger(self.opt, self.hyp) + try: + self.clearml = ClearmlLogger(self.opt, self.hyp) + except Exception: + self.clearml = None + prefix = colorstr('ClearML: ') + LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' + f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') + else: self.clearml = None From d7955fe438cbc4ca9fd735b79fa99545ffa81575 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 2 Dec 2022 00:00:43 +0100 Subject: [PATCH 294/326] Fix clearml args logging when training is launch with run() (#10359) * Connect opt to clearml args * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update clearml_utils.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/clearml/clearml_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 08aa9fd3327f..7ad40ea5f987 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -96,6 +96,7 @@ def __init__(self, opt, hyp): # Only the hyperparameters coming from the yaml config file # will have to be added manually! self.task.connect(hyp, name='Hyperparameters') + self.task.connect(opt, name='Args') # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent self.task.set_base_docker("ultralytics/yolov5:latest", From d1ffc3a3a72b438175d3b4cd6e84ef1bc8df2703 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Dec 2022 13:18:39 -0800 Subject: [PATCH 295/326] Create CITATION.cff (#10387) * Create CITATION.cff @AyushExel @pderrenger new citation file!! :) Signed-off-by: Glenn Jocher * Update CITATION.cff Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- CITATION.cff | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 CITATION.cff diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 000000000000..f8d5fdc3785d --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,14 @@ +cff-version: 1.2.0 +preferred-citation: + type: software + message: If you use YOLOv5, please cite it as below. + authors: + - family-names: Jocher + given-names: Glenn + orcid: "https://orcid.org/0000-0001-5950-6979" + title: "YOLOv5 by Ultralytics" + version: 7.0.0 + doi: 10.5281/zenodo.3908559 + date-released: 2020-5-29 + license: GPL-3.0 + url: "https://github.com/ultralytics/yolov5" From e96113e48591f246620a3696b7de84423c3c1e42 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Dec 2022 13:27:45 -0800 Subject: [PATCH 296/326] Update CITATION.cff to version: v7.0 (#10389) Update version: v7.0 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- CITATION.cff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CITATION.cff b/CITATION.cff index f8d5fdc3785d..8e2cf1148b92 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -7,7 +7,7 @@ preferred-citation: given-names: Glenn orcid: "https://orcid.org/0000-0001-5950-6979" title: "YOLOv5 by Ultralytics" - version: 7.0.0 + version: 7.0 doi: 10.5281/zenodo.3908559 date-released: 2020-5-29 license: GPL-3.0 From a1b6e79ccf0b66f201720d82d79da14bc44bad6d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Dec 2022 13:28:33 -0800 Subject: [PATCH 297/326] Revert TQDM bar format changes (#10343) Per https://github.com/ultralytics/yolov5/issues/10342 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index efe8590f85a1..99a96576c3fd 100644 --- a/utils/general.py +++ b/utils/general.py @@ -49,7 +49,7 @@ DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode -TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format +TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf torch.set_printoptions(linewidth=320, precision=5, profile='long') From 9722e6ffe5926fa20387c678d4ca0aef410a0c05 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 3 Dec 2022 14:41:08 -0800 Subject: [PATCH 298/326] `process_mask_native()` cleanup (#10366) * process_mask_native() cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix arg name * cleanup anno_json * Remove scale_image * Remove scale_image * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update to native Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/predict.py | 17 +++++++++-------- segment/val.py | 10 +++++----- utils/segment/general.py | 20 ++++++++++---------- val.py | 4 ++-- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 4d8458fd879e..4ba9e46ddab0 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -44,7 +44,7 @@ from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, - strip_optimizer, xyxy2xywh) + strip_optimizer) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode @@ -161,10 +161,9 @@ def run( # Segments if save_txt: - segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) - for x in segments] + for x in reversed(masks2segments(masks))] # Print results for c in det[:, 5].unique(): @@ -172,15 +171,17 @@ def run( s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting - plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ - if retina_masks else im[i] - annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) + annotator.masks( + masks, + colors=[colors(x, True) for x in det[:, 5]], + im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / + 255 if retina_masks else im[i]) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file - segj = segments[j].reshape(-1) # (n,2) to (n*2) - line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format + seg = segments[j].reshape(-1) # (n,2) to (n*2) + line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') diff --git a/segment/val.py b/segment/val.py index 48bf28d4bf4f..368a058f9ced 100644 --- a/segment/val.py +++ b/segment/val.py @@ -48,7 +48,7 @@ from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader -from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image +from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode @@ -160,7 +160,7 @@ def run( ): if save_json: check_requirements(['pycocotools']) - process = process_mask_upsample # more accurate + process = process_mask_native # more accurate else: process = process_mask # faster @@ -312,7 +312,7 @@ def run( pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: - plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + plot_masks.append(pred_masks[:15]) # filter top 15 to plot # Save/log if save_txt: @@ -367,8 +367,8 @@ def run( # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json - pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations + pred_json = str(save_dir / f"{w}_predictions.json") # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) diff --git a/utils/segment/general.py b/utils/segment/general.py index 6ebfd27bd9d3..9da894538665 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -25,10 +25,10 @@ def crop_mask(masks, boxes): def process_mask_upsample(protos, masks_in, bboxes, shape): """ Crop after upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms + protos: [mask_dim, mask_h, mask_w] + masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) + shape: input_image_size, (h, w) return: h, w, n """ @@ -67,25 +67,25 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): return masks.gt_(0.5) -def process_mask_native(protos, masks_in, bboxes, dst_shape): +def process_mask_native(protos, masks_in, bboxes, shape): """ Crop after upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms + protos: [mask_dim, mask_h, mask_w] + masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) + shape: input_image_size, (h, w) return: h, w, n """ c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) - gain = min(mh / dst_shape[0], mw / dst_shape[1]) # gain = old / new - pad = (mw - dst_shape[1] * gain) / 2, (mh - dst_shape[0] * gain) / 2 # wh padding + gain = min(mh / shape[0], mw / shape[1]) # gain = old / new + pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding top, left = int(pad[1]), int(pad[0]) # y, x bottom, right = int(mh - pad[1]), int(mw - pad[0]) masks = masks[:, top:bottom, left:right] - masks = F.interpolate(masks[None], dst_shape, mode='bilinear', align_corners=False)[0] # CHW + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW masks = crop_mask(masks, bboxes) # CHW return masks.gt_(0.5) diff --git a/val.py b/val.py index 7c610e83a856..e84249ed383f 100644 --- a/val.py +++ b/val.py @@ -302,8 +302,8 @@ def run( # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json - pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations + pred_json = str(save_dir / f"{w}_predictions.json") # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) From 5dc1ce4e865960f5b5dfe4e4f5148a4731433bca Mon Sep 17 00:00:00 2001 From: Colin Wong Date: Sat, 3 Dec 2022 16:58:58 -0600 Subject: [PATCH 299/326] Support `.txt` files as a line-by-line media list rather than streams (#10059) * Update streams.txt default Signed-off-by: Colin Wong * Change streams list extension to .streams * Read txt as media per line * Missed one * Missed another one * Update dataloaders.py * Update detect.py * Update dataloaders.py * Update detect.py * Update predict.py * Update predict.py * Update README.md Signed-off-by: Colin Wong Co-authored-by: Glenn Jocher --- README.md | 18 ++++++++++-------- classify/predict.py | 4 +++- detect.py | 4 +++- segment/predict.py | 4 +++- utils/dataloaders.py | 4 +++- 5 files changed, 22 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index dd24a938a060..3c163b3e1742 100644 --- a/README.md +++ b/README.md @@ -182,14 +182,16 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc. the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash -python detect.py --source 0 # webcam - img.jpg # image - vid.mp4 # video - screen # screenshot - path/ # directory - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ```
diff --git a/classify/predict.py b/classify/predict.py index 9a6b00062932..5a5edabda42c 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -8,6 +8,8 @@ vid.mp4 # video screen # screenshot path/ # directory + list.txt # list of images + list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream @@ -74,7 +76,7 @@ def run( save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download diff --git a/detect.py b/detect.py index 58b02802e6d9..2d13401f78bd 100644 --- a/detect.py +++ b/detect.py @@ -8,6 +8,8 @@ vid.mp4 # video screen # screenshot path/ # directory + list.txt # list of images + list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream @@ -82,7 +84,7 @@ def run( save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download diff --git a/segment/predict.py b/segment/predict.py index 4ba9e46ddab0..e9093baa1cc7 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -8,6 +8,8 @@ vid.mp4 # video screen # screenshot path/ # directory + list.txt # list of images + list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream @@ -85,7 +87,7 @@ def run( save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download diff --git a/utils/dataloaders.py b/utils/dataloaders.py index cc5f8843ef18..6d2b27ea5e60 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -238,6 +238,8 @@ def __next__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line + path = Path(path).read_text().rsplit() files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: p = str(Path(p).resolve()) @@ -338,7 +340,7 @@ def __len__(self): class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): torch.backends.cudnn.benchmark = True # faster for fixed-size inference self.mode = 'stream' self.img_size = img_size From f8539a680041a9f4fbcc4fcdd8f540724da453af Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Mon, 5 Dec 2022 21:12:19 +0100 Subject: [PATCH 300/326] Allow users to specify how to override a ClearML Task (#10363) * Added basic flag to enable reusing last task clearml * Added option to provide task ID to override * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Use exist_ok argument instead Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/clearml/clearml_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 7ad40ea5f987..3457727a96a4 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -89,6 +89,7 @@ def __init__(self, opt, hyp): task_name=opt.name if opt.name != 'exp' else 'Training', tags=['YOLOv5'], output_uri=True, + reuse_last_task_id=opt.exist_ok, auto_connect_frameworks={'pytorch': False} # We disconnect pytorch auto-detection, because we added manual model save points in the code ) From 0a1fdcd8ebaebf48d95d795c3693a0148f3ec0f9 Mon Sep 17 00:00:00 2001 From: Leander van Eekelen <47320151+leandervaneekelen@users.noreply.github.com> Date: Tue, 6 Dec 2022 23:48:17 +0100 Subject: [PATCH 301/326] Add catch for misspelled `--task` (#10420) * Add catch for misspelled task Signed-off-by: Leander van Eekelen <47320151+leandervaneekelen@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update val.py Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher Signed-off-by: Leander van Eekelen <47320151+leandervaneekelen@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- segment/val.py | 2 ++ val.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/segment/val.py b/segment/val.py index 368a058f9ced..5cf8ae8b41c1 100644 --- a/segment/val.py +++ b/segment/val.py @@ -463,6 +463,8 @@ def main(opt): np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') plot_val_study(x=x) # plot + else: + raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') if __name__ == "__main__": diff --git a/val.py b/val.py index e84249ed383f..8d27d9d3dab1 100644 --- a/val.py +++ b/val.py @@ -399,6 +399,8 @@ def main(opt): np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') plot_val_study(x=x) # plot + else: + raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') if __name__ == "__main__": From 06243845b3b7f367350ee93323e47740d40e560d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 6 Dec 2022 15:12:20 -0800 Subject: [PATCH 302/326] [pre-commit.ci] pre-commit suggestions (#10409) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit suggestions updates: - [github.com/pre-commit/pre-commit-hooks: v4.3.0 → v4.4.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.3.0...v4.4.0) - [github.com/asottile/pyupgrade: v3.2.0 → v3.3.0](https://github.com/asottile/pyupgrade/compare/v3.2.0...v3.3.0) - [github.com/PyCQA/flake8: 5.0.4 → 6.0.0](https://github.com/PyCQA/flake8/compare/5.0.4...6.0.0) * Fix flake8 ignore syntax Signed-off-by: Glenn Jocher * spacing Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .pre-commit-config.yaml | 6 +++--- setup.cfg | 24 ++++++++++-------------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0106b4aab523..72c3cc67e59f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: # - id: end-of-file-fixer - id: trailing-whitespace @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v3.2.0 + rev: v3.3.0 hooks: - id: pyupgrade name: Upgrade code @@ -58,7 +58,7 @@ repos: - id: yesqa - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.0.0 hooks: - id: flake8 name: PEP8 diff --git a/setup.cfg b/setup.cfg index f12995da3e8e..d7c4cb3e1a4d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,6 @@ license_file = LICENSE description_file = README.md - [tool:pytest] norecursedirs = .git @@ -17,7 +16,6 @@ addopts = --durations=25 --color=yes - [flake8] max-line-length = 120 exclude = .tox,*.egg,build,temp @@ -27,17 +25,16 @@ verbose = 2 # https://pep8.readthedocs.io/en/latest/intro.html#error-codes format = pylint # see: https://www.flake8rules.com/ -ignore = - E731 # Do not assign a lambda expression, use a def - F405 # name may be undefined, or defined from star imports: module - E402 # module level import not at top of file - F401 # module imported but unused - W504 # line break after binary operator - E127 # continuation line over-indented for visual indent - E231 # missing whitespace after ‘,’, ‘;’, or ‘:’ - E501 # line too long - F403 # ‘from module import *’ used; unable to detect undefined names - +ignore = E731,F405,E402,F401,W504,E127,E231,E501,F403 + # E731: Do not assign a lambda expression, use a def + # F405: name may be undefined, or defined from star imports: module + # E402: module level import not at top of file + # F401: module imported but unused + # W504: line break after binary operator + # E127: continuation line over-indented for visual indent + # E231: missing whitespace after ‘,’, ‘;’, or ‘:’ + # E501: line too long + # F403: ‘from module import *’ used; unable to detect undefined names [isort] # https://pycqa.github.io/isort/docs/configuration/options.html @@ -45,7 +42,6 @@ line_length = 120 # see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html multi_line_output = 0 - [yapf] based_on_style = pep8 spaces_before_comment = 2 From 454dae1301abb3fbf4fd1f54d5dc706cc69f8e7e Mon Sep 17 00:00:00 2001 From: Talia Bender <85292283+taliabender@users.noreply.github.com> Date: Wed, 7 Dec 2022 00:45:24 +0100 Subject: [PATCH 303/326] Ultralytics Live Session 2 - Roboflow https://youtu.be/LKpuzZllNpA (#10426) * Update README.md Info for Ep 2 of Ultralytics Live Sessions Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> * Update README.md Signed-off-by: Glenn Jocher * Update image link Make sure we update the href field in the image so when users click the image they go directly to the YouTube live page. Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3c163b3e1742..91ffcb1f95a9 100644 --- a/README.md +++ b/README.md @@ -54,9 +54,10 @@
-We're excited to announce our very first [Ultralytics Live Session](https://www.youtube.com/@Ultralytics/streams) ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th at 16:00 CET** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/LKpuzZllNpA) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 - + +
##
Segmentation ⭐ NEW
From de812396fe94996cfc0e8c75cfdcc446b61e3439 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 8 Dec 2022 17:25:45 -0800 Subject: [PATCH 304/326] Add README App section (#10446) * Add README App section @AyushExel @pderrenger this should increase our app visibility per https://github.com/ultralytics/yolov5/issues/10431 Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 91ffcb1f95a9..f00cb76c6ce9 100644 --- a/README.md +++ b/README.md @@ -427,6 +427,13 @@ Get started in seconds with our verified environments. Click each icon below for
+##
App
+ +Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! + + +Ultralytics mobile app + ##
Contribute
From 1607aec4312719db820a026792223acad915015f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 8 Dec 2022 17:27:36 -0800 Subject: [PATCH 305/326] Automatic README translation to Simplified Chinese (#10445) * Create translate-readme.yml @AyushExel @pderrenger @Laughing-q adding README translation action since we are unable to manually maintain our Chinese-translated README Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Double hyperlinks Signed-off-by: Glenn Jocher * Delete README_cn.md Signed-off-by: Glenn Jocher * Create README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 344 ------------------ .github/workflows/translate-readme.yml | 27 ++ .pre-commit-config.yaml | 2 +- README.md | 17 +- README.zh-CN.md | 479 +++++++++++++++++++++++++ 5 files changed, 513 insertions(+), 356 deletions(-) delete mode 100644 .github/README_cn.md create mode 100644 .github/workflows/translate-readme.yml create mode 100644 README.zh-CN.md diff --git a/.github/README_cn.md b/.github/README_cn.md deleted file mode 100644 index 0a2f61ee35b2..000000000000 --- a/.github/README_cn.md +++ /dev/null @@ -1,344 +0,0 @@ -
-

- - -

- - [English](../README.md) | 简体中文 -
-
- YOLOv5 CI - YOLOv5 Citation - Docker Pulls -
- Run on Gradient - Open In Colab - Open In Kaggle -
- -
-

- YOLOv5🚀是一个在COCO数据集上预训练的物体检测架构和模型系列,它代表了Ultralytics对未来视觉AI方法的公开研究,其中包含了在数千小时的研究和开发中所获得的经验和最佳实践。 -

- -
- - - - - - - - - - - - - - - - - - - - -
-
- - -##
文件
- -请参阅[YOLOv5 Docs](https://docs.ultralytics.com),了解有关训练、测试和部署的完整文件。 - -##
快速开始案例
- -
-安装 - -在[**Python>=3.7.0**](https://www.python.org/) 的环境中克隆版本仓并安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt),包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/)。 -```bash -git clone https://github.com/ultralytics/yolov5 # 克隆 -cd yolov5 -pip install -r requirements.txt # 安装 -``` - -
- -
-推理 - -YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从最新YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases)下载。 - -```python -import torch - -# 模型 -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom - -# 图像 -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list - -# 推理 -results = model(img) - -# 结果 -results.print() # or .show(), .save(), .crop(), .pandas(), etc. -``` - -
- -
-用 detect.py 进行推理 - -`detect.py` 在各种数据源上运行推理, 其会从最新的 YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中自动下载 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 并将检测结果保存到 `runs/detect` 目录。 - -```bash -python detect.py --source 0 # 网络摄像头 - img.jpg # 图像 - vid.mp4 # 视频 - path/ # 文件夹 - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP 流 -``` - -
- -
-训练 - -以下指令再现了 YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) -数据集结果. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 自动从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中下载。YOLOv5n/s/m/l/x的训练时间在V100 GPU上是 1/2/4/6/8天(多GPU倍速). 尽可能使用最大的 `--batch-size`, 或通过 `--batch-size -1` 来实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092). 批量大小显示为 V100-16GB。 - -```bash -python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 -``` - - - -
- -
-教程 - -- [训练自定义数据集](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 推荐 -- [获得最佳训练效果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️ - 推荐 -- [多GPU训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 新 -- [TFLite, ONNX, CoreML, TensorRT 输出](https://github.com/ultralytics/yolov5/issues/251) 🚀 -- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型剪枝/稀疏性](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [带有冻结层的迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构概要](https://github.com/ultralytics/yolov5/issues/6998) 🌟 新 -- [使用Weights & Biases 记录实验](https://github.com/ultralytics/yolov5/issues/1289) -- [Roboflow:数据集,标签和主动学习](https://github.com/ultralytics/yolov5/issues/4975) 🌟 新 -- [使用ClearML 记录实验](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 新 -- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 新 - -
- - -##
Integrations
- -
- - -
-
- -
- - - - - - - - - - - -
- -|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| -|:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| - - -##
Ultralytics HUB
- -[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - - - - - -##
为什么选择 YOLOv5
- -

-
- YOLOv5-P5 640 图像 (点击扩展) - -

-
-
- 图片注释 (点击扩展) - -- **COCO AP val** 表示 mAP@0.5:0.95 在5000张图像的[COCO val2017](http://cocodataset.org)数据集上,在256到1536的不同推理大小上测量的指标。 -- **GPU Speed** 衡量的是在 [COCO val2017](http://cocodataset.org) 数据集上使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例在批量大小为32时每张图像的平均推理时间。 -- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) ,批量大小设置为 8。 -- 复现 mAP 方法: `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` - -
- -### 预训练检查点 - -| 模型 | 规模
(像素) | mAP验证
0.5:0.95 | mAP验证
0.5 | 速度
CPU b1
(ms) | 速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数
(M) | 浮点运算
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | - -
- 表格注释 (点击扩展) - -- 所有检查点都以默认设置训练到300个时期. Nano和Small模型用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, 其他模型使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- **mAPval** 值是 [COCO val2017](http://cocodataset.org) 数据集上的单模型单尺度的值。 -
复现方法: `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- 使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) 实例对COCO val图像的平均速度。不包括NMS时间(~1 ms/img) -
复现方法: `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和比例增强. -
复现方法: `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` - -
- - -##
分类 ⭐ 新
- -YOLOv5发布的[v6.2版本](https://github.com/ultralytics/yolov5/releases) 支持训练,验证,预测和输出分类模型!这使得训练分类器模型非常简单。点击下面开始尝试! - -
- 分类检查点 (点击展开) - -
- -我们在ImageNet上使用了4xA100的实例训练YOLOv5-cls分类模型90个epochs,并以相同的默认设置同时训练了ResNet和EfficientNet模型来进行比较。我们将所有的模型导出到ONNX FP32进行CPU速度测试,又导出到TensorRT FP16进行GPU速度测试。最后,为了方便重现,我们在[Google Colab Pro](https://colab.research.google.com/signup)上进行了所有的速度测试。 - -| 模型 | 规模
(像素) | 准确度
第一 | 准确度
前五 | 训练
90 epochs
4xA100 (小时) | 速度
ONNX CPU
(ms) | 速度
TensorRT V100
(ms) | 参数
(M) | 浮点运算
@224 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | - -
- 表格注释 (点击扩展) - -- 所有检查点都被SGD优化器训练到90 epochs, `lr0=0.001` 和 `weight_decay=5e-5`, 图像大小为224,全为默认设置。
运行数据记录于 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2。 -- **准确度** 值为[ImageNet-1k](https://www.image-net.org/index.php)数据集上的单模型单尺度。
通过`python classify/val.py --data ../datasets/imagenet --img 224`进行复制。 -- 使用Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM实例得出的100张推理图像的平均**速度**。
通过 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`进行复制。 -- 用`export.py`**导出**到FP32的ONNX和FP16的TensorRT。
通过 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`进行复制。 -
-
- -
- 分类使用实例 (点击展开) - -### 训练 -YOLOv5分类训练支持自动下载MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof和ImageNet数据集,并使用`--data` 参数. 打个比方,在MNIST上使用`--data mnist`开始训练。 - -```bash -# 单GPU -python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 - -# 多-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 -``` - -### 验证 -在ImageNet-1k数据集上验证YOLOv5m-cl的准确性: -```bash -bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) -python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate -``` - -### 预测 -用提前训练好的YOLOv5s-cls.pt去预测bus.jpg: -```bash -python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg -``` -```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub -``` - -### 导出 -导出一组训练好的YOLOv5s-cls, ResNet和EfficientNet模型到ONNX和TensorRT: -```bash -python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 -``` -
- - -##
贡献
- -我们重视您的意见! 我们希望给大家提供尽可能的简单和透明的方式对 YOLOv5 做出贡献。开始之前请先点击并查看我们的 [贡献指南](CONTRIBUTING.md),填写[YOLOv5调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 来向我们发送您的经验反馈。真诚感谢我们所有的贡献者! - - - - -##
联系
- -关于YOLOv5的漏洞和功能问题,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues)。商业咨询或技术支持服务请访问[https://ultralytics.com/contact](https://ultralytics.com/contact)。 - -
-
- - - - - - - - - - - - - - - - - - - - -
- -[assets]: https://github.com/ultralytics/yolov5/releases -[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml new file mode 100644 index 000000000000..76f59b83e65f --- /dev/null +++ b/.github/workflows/translate-readme.yml @@ -0,0 +1,27 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md + +name: Translate README + +on: + push: + branches: + - main + - master + paths: + - README.md + +jobs: + Translate: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version: 16 + # ISO Langusge Codes: https://cloud.google.com/translate/docs/languages + - name: Adding README - Chinese Simplified + uses: dephraiim/translate-readme@main + with: + LANG: zh-CN diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 72c3cc67e59f..28dbc89223cf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,7 +50,7 @@ repos: additional_dependencies: - mdformat-gfm - mdformat-black - exclude: "README.md|README_cn.md" + exclude: "README.md|README.zh-CN.md" - repo: https://github.com/asottile/yesqa rev: v1.4.0 diff --git a/README.md b/README.md index f00cb76c6ce9..9ee97321082e 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@

- English | [简体中文](.github/README_cn.md) + [English](README.md) | [简体中文](README.zh-CN.md)
YOLOv5 CI @@ -15,15 +15,11 @@ Open In Colab Open In Kaggle
-
-

- YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -

- To request an Enterprise License please complete the form at Ultralytics Licensing. -

-

+ +YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. + +To request an Enterprise License please complete the form at Ultralytics Licensing.
@@ -313,7 +309,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We | [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | | [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | | [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes @@ -479,5 +475,4 @@ For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github
-[assets]: https://github.com/ultralytics/yolov5/releases [tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/README.zh-CN.md b/README.zh-CN.md new file mode 100644 index 000000000000..09cfc9472d9a --- /dev/null +++ b/README.zh-CN.md @@ -0,0 +1,479 @@ +
+

+ + +

+ +[英语](README.md)|[简体中文](README.zh-CN.md)
+ +
+ YOLOv5 CI + YOLOv5 Citation + Docker Pulls +
+ Run on Gradient + Open In Colab + Open In Kaggle +
+
+ +YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表超力对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 + +要申请企业许可证,请填写表格Ultralytics 许可. + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ +##
Ultralytics 现场会议
+ +
+ +[Ultralytics Live Session Ep。 2个](https://youtu.be/LKpuzZllNpA)✨将直播**欧洲中部时间 12 月 13 日星期二 19:00**和[约瑟夫·纳尔逊](https://github.com/josephofiowa)的[机器人流](https://roboflow.com/?ref=ultralytics)谁将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。收听 Glenn 和 Joseph 询问如何通过无缝数据集集成来加快工作流程! 🔥 + + + +
+ +##
细分 ⭐ 新
+ +
+ + +
+ +我们新的 YOLOv5[发布 v7.0](https://github.com/ultralytics/yolov5/releases/v7.0)实例分割模型是世界上最快和最准确的,击败所有当前[SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco).我们使它们非常易于训练、验证和部署。查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v7.0)并访问我们的[YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb)快速入门教程。 + +
+ Segmentation Checkpoints + +
+ +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 个时期的 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)便于重现的笔记本。 + +| 模型 | 尺寸
(像素) | 地图盒子
50-95 | 地图面具
50-95 | 火车时间
300个纪元
A100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
同仁堂A100
(小姐) | 参数
(男) | 失败者
@640(二) | +| ------------------------------------------------------------------------------------------ | --------------- | ------------------ | ------------------ | ------------------------------- | ----------------------------- | -------------------------- | -------------- | ------------------- | +| [YOLOv5n-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m段](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 我:43(X) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (zks) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- 使用 SGD 优化器将所有检查点训练到 300 个时期`lr0=0.01`和`weight_decay=5e-5`在图像大小 640 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5_V70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official) +- **准确性**值适用于 COCO 数据集上的单模型单尺度。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **速度**使用 a 对超过 100 个推理图像进行平均[协作临](https://colab.research.google.com/signup)A100 高 RAM 实例。值仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### 火车 + +YOLOv5分割训练支持自动下载COCO128-seg分割数据集`--data coco128-seg.yaml`COCO-segments 数据集的参数和手动下载`bash data/scripts/get_coco.sh --train --val --segments`接着`python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### 瓦尔 + +在 COCO 数据集上验证 YOLOv5s-seg mask mAP: + +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### 预测 + +使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### 出口 + +将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ +##
文档
+ +见[YOLOv5 文档](https://docs.ultralytics.com)有关培训、测试和部署的完整文档。请参阅下面的快速入门示例。 + +
+Install + +克隆回购并安装[要求.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt)在一个[**Python>=3.7.0**](https://www.python.org/)环境,包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +
+ +
+Inference + +YOLOv5[PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)推理。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从最新下载 +YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). + +```python +import torch + +# Model +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom + +# Images +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list + +# Inference +results = model(img) + +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +
+ +
+Inference with detect.py + +`detect.py`在各种来源上运行推理,下载[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从 +最新的YOLOv5[发布](https://github.com/ultralytics/yolov5/releases)并将结果保存到`runs/detect`. + +```bash +python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +``` + +
+ +
+Training + +下面的命令重现 YOLOv5[可可](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)结果。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)和[数据集](https://github.com/ultralytics/yolov5/tree/master/data)自动从最新下载 +YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). YOLOv5n/s/m/l/x 的训练时间为 +V100 GPU 上 1/2/4/6/8 天([多GPU](https://github.com/ultralytics/yolov5/issues/475)倍快)。使用 +最大的`--batch-size`可能,或通过`--batch-size -1`为了 +YOLOv5[自动批处理](https://github.com/ultralytics/yolov5/pull/5092).显示的批量大小适用于 V100-16GB。 + +```bash +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + + + +
+ +
+Tutorials + +- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ + 推荐的 +- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 +- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 +- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [模型集成](https://github.com/ultralytics/yolov5/issues/318) +- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) +- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) +- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 +- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 +- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 +- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 + +
+ +##
集成
+ +
+ + +
+
+ +
+ + + + + + + + + + + +
+ +| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | +| :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | +| 将您的自定义数据集标记并直接导出到 YOLOv5 以进行训练[机器人流](https://roboflow.com/?ref=ultralytics) | 使用自动跟踪、可视化甚至远程训练 YOLOv5[清除ML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[彗星](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化YOLOv5以获得更好的推理性能[所以](https://bit.ly/yolov5-deci-platform) | + +##
Ultralytics 集线器
+ +[Ultralytics 集线器](https://bit.ly/ultralytics_hub)是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。开始使用**自由的**现在! + + + + +##
为什么选择 YOLOv5
+ +YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实世界的结果。 + +

+
+ YOLOv5-P5 640 Figure + +

+
+
+ Figure Notes + +- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 +- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 +- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 +- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + +
+ +### 预训练检查点 + +| 模型 | 尺寸
(像素) | 地图
50-95 | 地图
50 | 速度
处理器b1
(小姐) | 速度
V100 b1
(小姐) | 速度
V100 b32
(小姐) | 参数
(男) | 失败者
@640(二) | +| --------------------------------------------------------------------------------------------------- | --------------- | ----------------- | ---------------- | ------------------------ | -------------------------- | --------------------------- | -------------- | ------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[电讯局][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | + +
+ Table Notes + +- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +- \*\*地图\*\*值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` + +
+ +##
分类⭐新
+ +YOLOv5[发布 v6.2](https://github.com/ultralytics/yolov5/releases)带来对分类模型训练、验证和部署的支持!查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v6.2)并访问我们的[YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb)快速入门教程。 + +
+ Classification Checkpoints + +
+ +我们使用 4xA100 实例在 ImageNet 上训练了 90 个时期的 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)为了便于重现。 + +| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(二) | +| ------------------------------------------------------------------------------------------ | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [高效网络_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | + +
+ Table Notes (click to expand) + +- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) +- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` +- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +
+
+ +
+ Classification Usage Examples  Open In Colab + +### 火车 + +YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集`--data`争论。开始使用 MNIST 进行训练`--data mnist`. + +```bash +# Single-GPU +python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +``` + +### 瓦尔 + +在 ImageNet-1k 数据集上验证 YOLOv5m-cls 的准确性: + +```bash +bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) +python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate +``` + +### 预测 + +使用预训练的 YOLOv5s-cls.pt 来预测 bus.jpg: + +```bash +python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub +``` + +### 出口 + +将一组经过训练的 YOLOv5s-cls、ResNet 和 EfficientNet 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 +``` + +
+ +##
环境
+ +在几秒钟内开始使用我们经过验证的环境。单击下面的每个图标了解详细信息。 + +
+ + + + + + + + + + + + + + + + + +
+ +##
贡献
+ +我们喜欢您的意见!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的[投稿指南](CONTRIBUTING.md)开始,并填写[YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们发送您的体验反馈。感谢我们所有的贡献者! + + + + + +##
执照
+ +YOLOv5 在两种不同的许可下可用: + +- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 +- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). + +##
接触
+ +对于 YOLOv5 错误和功能请求,请访问[GitHub 问题](https://github.com/ultralytics/yolov5/issues).如需专业支持,请[联系我们](https://ultralytics.com/contact). + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ +[tta]: https://github.com/ultralytics/yolov5/issues/303 From 342fe05e6c88221750ce7e90b7d2e8baabd397dc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 9 Dec 2022 01:47:14 +0000 Subject: [PATCH 306/326] docs: Added README."zh-CN".md translation via https://github.com/dephraiim/translate-readme --- README.zh-CN.md | 135 +++++++++++++++++++++++++----------------------- 1 file changed, 69 insertions(+), 66 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 09cfc9472d9a..0fc77565c5ef 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,9 +4,9 @@

-[英语](README.md)|[简体中文](README.zh-CN.md)
+[英语](README.md)\|[简体中文](README.zh-CN.md)
-
+
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -21,7 +21,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表Ultralytics 许可. -
+
@@ -79,10 +79,10 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Tutorials -- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ - 推荐的 -- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 -- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 -- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 -- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 -- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ + 推荐的 +- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 +- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 +- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [模型集成](https://github.com/ultralytics/yolov5/issues/318) +- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) +- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) +- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 +- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 +- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 +- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新
@@ -265,7 +263,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | +| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | | :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | | 将您的自定义数据集标记并直接导出到 YOLOv5 以进行训练[机器人流](https://roboflow.com/?ref=ultralytics) | 使用自动跟踪、可视化甚至远程训练 YOLOv5[清除ML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[彗星](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化YOLOv5以获得更好的推理性能[所以](https://bit.ly/yolov5-deci-platform) | @@ -289,10 +287,10 @@ YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实
Figure Notes -- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 -- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 -- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 -- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 +- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 +- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 +- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
@@ -315,10 +313,10 @@ YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实
Table Notes -- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- \*\*地图\*\*值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +- **地图**值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -333,33 +331,33 @@ YOLOv5[发布 v6.2](https://github.com/ultralytics/yolov5/releases)带来对分 我们使用 4xA100 实例在 ImageNet 上训练了 90 个时期的 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)为了便于重现。 -| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(二) | -| ------------------------------------------------------------------------------------------ | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [高效网络_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(乙) | +| ------------------------------------------------------------------------------------------- | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [高效网络\_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (click to expand) -- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) -- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` -- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` -
- +- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) +- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` +- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + +
Classification Usage Examples  Open In Colab @@ -394,9 +392,7 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` ```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5s-cls.pt" -) # load from PyTorch Hub +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub ``` ### 出口 @@ -433,6 +429,13 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
+##
应用程序
+ +在您的 iOS 或 Android 设备上运行 YOLOv5 模型[Ultralytics 应用程序](https://ultralytics.com/app_install)! + + +Ultralytics mobile app + ##
贡献
我们喜欢您的意见!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的[投稿指南](CONTRIBUTING.md)开始,并填写[YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们发送您的体验反馈。感谢我们所有的贡献者! @@ -445,8 +448,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu YOLOv5 在两种不同的许可下可用: -- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 -- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). +- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 +- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). ##
接触
From 443ef7f33e0943ccc5e5c8ff922c6fe7a0cb7053 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Mon, 12 Dec 2022 12:29:19 +0900 Subject: [PATCH 307/326] Modify a comment for OpenCV File I/O Functions (#10467) Modify comment for OpenCV File I/O Functions Signed-off-by: Yonghye Kwon Signed-off-by: Yonghye Kwon --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 99a96576c3fd..e5a843c4a758 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1115,7 +1115,7 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): return path -# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ +# OpenCV Multilanguage-friendly functions ------------------------------------------------------------------------------------ imshow_ = cv2.imshow # copy to avoid recursion errors From 357cde9ee7da13ba3095995488c5a23631467f1a Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 13 Dec 2022 05:05:20 +0900 Subject: [PATCH 308/326] add force_reload=True when loading model using torch hub (#10460) Signed-off-by: Yonghye Kwon Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 657dc266da92..6ab0a33366a5 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -963,7 +963,7 @@ "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", "import torch\n", "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True) # yolov5n - yolov5x6 or custom\n", "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", "results = model(im) # inference\n", "results.print() # or .show(), .save(), .crop(), .pandas(), etc." @@ -972,4 +972,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 1752768fb3b3ff4f842eaaecf7eba4808ac124a9 Mon Sep 17 00:00:00 2001 From: Nioolek <40284075+Nioolek@users.noreply.github.com> Date: Wed, 14 Dec 2022 06:48:15 +0800 Subject: [PATCH 309/326] Fix Chinese README (#10465) * Beautify Chinese Documents * Beautify Chinese Documents * Beautify Chinese Documents * Beautify Chinese Documents * add blank * Update translate-readme.yml Disable auto-translation by changing on-push branch to 'translate_readme'. This prevents overwriting of manual fixes. Signed-off-by: Glenn Jocher * Update translate-readme.yml Signed-off-by: Glenn Jocher * fix live doc * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- .github/workflows/translate-readme.yml | 3 +- README.md | 4 +- README.zh-CN.md | 260 ++++++++++++------------- 3 files changed, 133 insertions(+), 134 deletions(-) diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 76f59b83e65f..538ff375097e 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -6,8 +6,7 @@ name: Translate README on: push: branches: - - main - - master + - translate_readme # replace with 'master' to enable action paths: - README.md diff --git a/README.md b/README.md index 9ee97321082e..21bdc83f349e 100644 --- a/README.md +++ b/README.md @@ -50,9 +50,9 @@ To request an Enterprise License please complete the form at
-[Ultralytics Live Session Ep. 2](https://youtu.be/LKpuzZllNpA) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/QGRtEG7UjtE) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 - +
diff --git a/README.zh-CN.md b/README.zh-CN.md index 0fc77565c5ef..15232be3aa4f 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,7 +4,7 @@

-[英语](README.md)\|[简体中文](README.zh-CN.md)
+[英文](README.md)\|[简体中文](README.zh-CN.md)
YOLOv5 CI @@ -17,9 +17,9 @@

-YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表超力对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 +YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -要申请企业许可证,请填写表格Ultralytics 许可. +如果要申请企业许可证,请填写表格Ultralytics 许可. +##
Ultralytics 直播会议
-[Ultralytics Live Session Ep。 2个](https://youtu.be/LKpuzZllNpA)✨将直播**欧洲中部时间 12 月 13 日星期二 19:00**和[约瑟夫·纳尔逊](https://github.com/josephofiowa)的[机器人流](https://roboflow.com/?ref=ultralytics)谁将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。收听 Glenn 和 Joseph 询问如何通过无缝数据集集成来加快工作流程! 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/QGRtEG7UjtE) ✨ 将与 [Roboflow](https://roboflow.com/?ref=ultralytics) 的 [Joseph Nelson](https://github.com/josephofiowa) 在 **欧洲中部时间 12 月 13 日星期二的 19:00** ,他将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。欢迎收听 Glenn 和 Joseph ,以了解如何通过无缝数据集集成来加快工作流程! 🔥 - +
-##
细分 ⭐ 新
+##
实例分割模型 ⭐ 新
-我们新的 YOLOv5[发布 v7.0](https://github.com/ultralytics/yolov5/releases/v7.0)实例分割模型是世界上最快和最准确的,击败所有当前[SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco).我们使它们非常易于训练、验证和部署。查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v7.0)并访问我们的[YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb)快速入门教程。 +我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。
- Segmentation Checkpoints + 实例分割模型列表
-我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 个时期的 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)便于重现的笔记本。 +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 -| 模型 | 尺寸
(像素) | 地图盒子
50-95 | 地图面具
50-95 | 火车时间
300个纪元
A100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
同仁堂A100
(小姐) | 参数
(男) | 失败者
@640(二) | -| ------------------------------------------------------------------------------------------ | --------------- | ------------------ | ------------------ | ------------------------------- | ----------------------------- | -------------------------- | -------------- | ------------------- | -| [YOLOv5n-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m段](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 我:43(X) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (zks) | 1579.2 | 4.5 | 88.8 | 265.7 | +| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | ------------------- | -------------------- | --------------------- | --------------------------------------------- | --------------------------------- | --------------------------------- | ----------------- | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | -- 使用 SGD 优化器将所有检查点训练到 300 个时期`lr0=0.01`和`weight_decay=5e-5`在图像大小 640 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5_V70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official) -- **准确性**值适用于 COCO 数据集上的单模型单尺度。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **速度**使用 a 对超过 100 个推理图像进行平均[协作临](https://colab.research.google.com/signup)A100 高 RAM 实例。值仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` +- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`
- Segmentation Usage Examples  Open In Colab + 分割模型使用示例  Open In Colab -### 火车 +### 训练 -YOLOv5分割训练支持自动下载COCO128-seg分割数据集`--data coco128-seg.yaml`COCO-segments 数据集的参数和手动下载`bash data/scripts/get_coco.sh --train --val --segments`接着`python train.py --data coco.yaml`. +YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 ```bash -# Single-GPU +# 单 GPU python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 -# Multi-GPU DDP +# 多 GPU, DDP 模式 python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 ``` -### 瓦尔 +### 验证 在 COCO 数据集上验证 YOLOv5s-seg mask mAP: ```bash -bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 ``` ### 预测 @@ -119,13 +119,13 @@ python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg ``` ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) ``` | ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | | ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -### 出口 +### 模型导出 将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: @@ -137,12 +137,12 @@ python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --devi ##
文档
-见[YOLOv5 文档](https://docs.ultralytics.com)有关培训、测试和部署的完整文档。请参阅下面的快速入门示例。 +有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com)。请参阅下面的快速入门示例。
-Install +安装 -克隆回购并安装[要求.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt)在一个[**Python>=3.7.0**](https://www.python.org/)环境,包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). +克隆 repo,并要求在 [**Python>=3.7.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/) 。 ```bash git clone https://github.com/ultralytics/yolov5 # clone @@ -153,10 +153,10 @@ pip install -r requirements.txt # install
-Inference +推理 -YOLOv5[PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)推理。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从最新下载 -YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). +使用 YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 ```python import torch @@ -177,10 +177,10 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
-Inference with detect.py +使用 detect.py 推理 -`detect.py`在各种来源上运行推理,下载[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从 -最新的YOLOv5[发布](https://github.com/ultralytics/yolov5/releases)并将结果保存到`runs/detect`. +`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从 +最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。 ```bash python detect.py --weights yolov5s.pt --source 0 # webcam @@ -198,13 +198,14 @@ python detect.py --weights yolov5s.pt --source 0 #
-Training +训练 -下面的命令重现 YOLOv5[可可](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)结果。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)和[数据集](https://github.com/ultralytics/yolov5/tree/master/data)自动从最新下载 -YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). YOLOv5n/s/m/l/x 的训练时间为 -V100 GPU 上 1/2/4/6/8 天([多GPU](https://github.com/ultralytics/yolov5/issues/475)倍快)。使用 -最大的`--batch-size`可能,或通过`--batch-size -1`为了 -YOLOv5[自动批处理](https://github.com/ultralytics/yolov5/pull/5092).显示的批量大小适用于 V100-16GB。 +下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 +最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) +将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 +YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://github.com/ultralytics/yolov5/issues/475) 训练速度更快)。 +尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 +YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 ```bash python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 @@ -219,16 +220,15 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-Tutorials +教程 - [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ - 推荐的 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 - [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 - [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 - [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) - [模型集成](https://github.com/ultralytics/yolov5/issues/318) - [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) - [超参数进化](https://github.com/ultralytics/yolov5/issues/607) @@ -236,12 +236,12 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 - [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 - [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新
-##
集成
+##
模块集成

@@ -263,118 +263,118 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - -| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | -| :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | -| 将您的自定义数据集标记并直接导出到 YOLOv5 以进行训练[机器人流](https://roboflow.com/?ref=ultralytics) | 使用自动跟踪、可视化甚至远程训练 YOLOv5[清除ML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[彗星](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化YOLOv5以获得更好的推理性能[所以](https://bit.ly/yolov5-deci-platform) | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Deci ⭐ 新 | +| :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化 YOLOv5 以获得更好的推理性能[Deci](https://bit.ly/yolov5-deci-platform) | -##
Ultralytics 集线器
+##
Ultralytics HUB
-[Ultralytics 集线器](https://bit.ly/ultralytics_hub)是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。开始使用**自由的**现在! +[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! ##
为什么选择 YOLOv5
-YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实世界的结果。 +YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结果。

- YOLOv5-P5 640 Figure + YOLOv5-P5 640 图

- Figure Notes + 图表笔记 -- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 -- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 -- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 -- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 +- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 +- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 +- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
-### 预训练检查点 - -| 模型 | 尺寸
(像素) | 地图
50-95 | 地图
50 | 速度
处理器b1
(小姐) | 速度
V100 b1
(小姐) | 速度
V100 b32
(小姐) | 参数
(男) | 失败者
@640(二) | -| --------------------------------------------------------------------------------------------------- | --------------- | ----------------- | ---------------- | ------------------------ | -------------------------- | --------------------------- | -------------- | ------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[电讯局][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +### 预训练模型 + +| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| --------------------------------------------------------------------------------------------------- | ------------------- | -------------------- | ------------------- | ------------------------------- | -------------------------------- | ------------------------------ | ----------------- | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
- Table Notes + 笔记 -- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- **地图**值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 +- **mAPval**在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
-##
分类⭐新
+##
分类网络 ⭐ 新
-YOLOv5[发布 v6.2](https://github.com/ultralytics/yolov5/releases)带来对分类模型训练、验证和部署的支持!查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v6.2)并访问我们的[YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb)快速入门教程。 +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对分类模型训练、验证和部署的支持!详情请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v6.2) 或访问我们的 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) 以快速入门。
- Classification Checkpoints + 分类网络模型
-我们使用 4xA100 实例在 ImageNet 上训练了 90 个时期的 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)为了便于重现。 - -| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(乙) | -| ------------------------------------------------------------------------------------------- | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [高效网络\_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +我们使用 4xA100 实例在 ImageNet 上训练了 90 个 epochs 得到 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于重现,我们在 Google 上进行了所有速度测试 [Colab Pro](https://colab.research.google.com/signup) 。 + +| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | +| -------------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ------------------------------------------ | --------------------------------- | -------------------------------------- | --------------- | -----------------------| +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
- Table Notes (click to expand) + Table Notes (点击以展开) -- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) -- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` -- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 +- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` +- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`
- Classification Usage Examples  Open In Colab + 分类训练示例  Open In Colab -### 火车 +### 训练 -YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集`--data`争论。开始使用 MNIST 进行训练`--data mnist`. +YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集,命令中使用 `--data` 即可。 MNIST 示例 `--data mnist` 。 ```bash -# Single-GPU +# 单 GPU python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 -# Multi-GPU DDP +# 多 GPU, DDP 模式 python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 ``` -### 瓦尔 +### 验证 在 ImageNet-1k 数据集上验证 YOLOv5m-cls 的准确性: @@ -395,7 +395,7 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub ``` -### 出口 +### 模型导出 将一组经过训练的 YOLOv5s-cls、ResNet 和 EfficientNet 模型导出到 ONNX 和 TensorRT: @@ -407,7 +407,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu ##
环境
-在几秒钟内开始使用我们经过验证的环境。单击下面的每个图标了解详细信息。 +使用下面我们经过验证的环境,在几秒钟内开始使用 YOLOv5 。单击下面的图标了解详细信息。 -##
应用程序
+##
APP
-在您的 iOS 或 Android 设备上运行 YOLOv5 模型[Ultralytics 应用程序](https://ultralytics.com/app_install)! +通过下载 [Ultralytics APP](https://ultralytics.com/app_install) ,以在您的 iOS 或 Android 设备上运行 YOLOv5 模型! Ultralytics mobile app ##
贡献
-我们喜欢您的意见!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的[投稿指南](CONTRIBUTING.md)开始,并填写[YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们发送您的体验反馈。感谢我们所有的贡献者! +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者!
-##
执照
+##
License
-YOLOv5 在两种不同的许可下可用: +YOLOv5 在两种不同的 License 下可用: -- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 -- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). +- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 +- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 -##
接触
+##
联系我们
-对于 YOLOv5 错误和功能请求,请访问[GitHub 问题](https://github.com/ultralytics/yolov5/issues).如需专业支持,请[联系我们](https://ultralytics.com/contact). +若发现 YOLOv5 的 bug 或有功能需求,请访问 [GitHub 问题](https://github.com/ultralytics/yolov5/issues) 。如需专业支持,请 [联系我们](https://ultralytics.com/contact) 。
From 1ae91940abe9ca3e064784bb18c12271ab3157b4 Mon Sep 17 00:00:00 2001 From: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> Date: Thu, 15 Dec 2022 07:56:42 -0500 Subject: [PATCH 310/326] Update Comet hyperlinks (#10500) * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 2 +- classify/tutorial.ipynb | 4 ++-- segment/tutorial.ipynb | 4 ++-- tutorial.ipynb | 2 +- utils/loggers/comet/README.md | 12 ++++++------ 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 21bdc83f349e..56015b239fc9 100644 --- a/README.md +++ b/README.md @@ -264,7 +264,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - |Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| ##
Ultralytics HUB
diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index c6f5d0d88a2d..94bafba00204 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1341,7 +1341,7 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -1476,4 +1476,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 09ca963d4b98..e1179ffc1cc6 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -454,7 +454,7 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -590,4 +590,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorial.ipynb b/tutorial.ipynb index 6ab0a33366a5..cebcee3dfd24 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -860,7 +860,7 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 8f206cd9830e..8a361e2b211d 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -2,13 +2,13 @@ # YOLOv5 with Comet -This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet) +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet2) # About Comet Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! # Getting Started @@ -54,7 +54,7 @@ That's it! Comet will automatically log your hyperparameters, command line argum yolo-ui # Try out an Example! -Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) Or better yet, try it out yourself in this Colab Notebook @@ -119,7 +119,7 @@ You can control the frequency of logged predictions and the associated images by **Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. -Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) ```shell @@ -161,7 +161,7 @@ env COMET_LOG_PER_CLASS_METRICS=true python train.py \ ## Uploading a Dataset to Comet Artifacts -If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag. +If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. @@ -251,6 +251,6 @@ comet optimizer -j utils/loggers/comet/hpo.py \ ### Visualizing Results -Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) hyperparameter-yolo From b564c1f3653a9b11038a80e348a34afbf59943be Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 17 Dec 2022 20:05:00 +0900 Subject: [PATCH 311/326] Check `conf_thres` and `iou_thres` prior to use (#10515) * Checks conf_thres and iou_thres at beign Why checks conf_thres after operation with it? Signed-off-by: Yonghye Kwon * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Signed-off-by: Yonghye Kwon Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index e5a843c4a758..6145801ca47f 100644 --- a/utils/general.py +++ b/utils/general.py @@ -898,6 +898,9 @@ def non_max_suppression( list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output @@ -909,10 +912,6 @@ def non_max_suppression( nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates - # Checks - assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' - assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' - # Settings # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height From 8d65f9d8ce274f78949ab88b7359580cc8cabacc Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 17 Dec 2022 20:10:26 +0900 Subject: [PATCH 312/326] Support extensive shape for functions related to bounding box localization (#10516) * support extensive shape for functions related to bounding box localization Signed-off-by: Yonghye Kwon * merge exp branch updates Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- utils/general.py | 54 ++++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/utils/general.py b/utils/general.py index 6145801ca47f..744abb439ed1 100644 --- a/utils/general.py +++ b/utils/general.py @@ -750,30 +750,30 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height + y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center + y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center + y[..., 2] = x[..., 2] - x[..., 0] # width + y[..., 3] = x[..., 3] - x[..., 1] # height return y def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x + y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y + y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x + y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y return y def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x + y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y + y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x + y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y return y @@ -782,18 +782,18 @@ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): if clip: clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center + y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center + y[..., 2] = (x[..., 2] - x[..., 0]) / w # width + y[..., 3] = (x[..., 3] - x[..., 1]) / h # height return y def xyn2xy(x, w=640, h=640, padw=0, padh=0): # Convert normalized segments into pixel segments, shape (n,2) y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * x[:, 0] + padw # top left x - y[:, 1] = h * x[:, 1] + padh # top left y + y[..., 0] = w * x[..., 0] + padw # top left x + y[..., 1] = h * x[..., 1] + padh # top left y return y @@ -833,9 +833,9 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): gain = ratio_pad[0][0] pad = ratio_pad[1] - boxes[:, [0, 2]] -= pad[0] # x padding - boxes[:, [1, 3]] -= pad[1] # y padding - boxes[:, :4] /= gain + boxes[..., [0, 2]] -= pad[0] # x padding + boxes[..., [1, 3]] -= pad[1] # y padding + boxes[..., :4] /= gain clip_boxes(boxes, img0_shape) return boxes @@ -862,13 +862,13 @@ def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=F def clip_boxes(boxes, shape): # Clip boxes (xyxy) to image shape (height, width) if isinstance(boxes, torch.Tensor): # faster individually - boxes[:, 0].clamp_(0, shape[1]) # x1 - boxes[:, 1].clamp_(0, shape[0]) # y1 - boxes[:, 2].clamp_(0, shape[1]) # x2 - boxes[:, 3].clamp_(0, shape[0]) # y2 + boxes[..., 0].clamp_(0, shape[1]) # x1 + boxes[..., 1].clamp_(0, shape[0]) # y1 + boxes[..., 2].clamp_(0, shape[1]) # x2 + boxes[..., 3].clamp_(0, shape[0]) # y2 else: # np.array (faster grouped) - boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 - boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 def clip_segments(segments, shape): From b2f94e8c356083bb85d76a60ea2b54d5ad9fbe36 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Dec 2022 12:26:57 +0100 Subject: [PATCH 313/326] Update to ONNX opset 17 (#10522) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 928992903b0b..baf86f1d9297 100644 --- a/export.py +++ b/export.py @@ -624,7 +624,7 @@ def parse_opt(): parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') From 43623729cc634d690cece1f1d015e4d59e0b9d98 Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Sat, 17 Dec 2022 19:55:08 +0800 Subject: [PATCH 314/326] Update train.py (#10485) Setting `master_port` to 1 may cause `Permission denied` due to failure to bind the port. So it is better to set it to a port greater than 1024. Signed-off-by: Wang Xin Signed-off-by: Wang Xin Co-authored-by: Ayush Chaurasia --- classify/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/classify/train.py b/classify/train.py index a50845a4f781..4767be77bd61 100644 --- a/classify/train.py +++ b/classify/train.py @@ -6,7 +6,7 @@ $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt From 2c35c1b318ecd4856275039220c052a976d2cfe2 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sun, 18 Dec 2022 21:03:01 +0900 Subject: [PATCH 315/326] Limit detections without explicit if condition (#10502) * limit detections without explicit if condition Signed-off-by: Yonghye Kwon * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup indexing code for limit detections Signed-off-by: Yonghye Kwon Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 744abb439ed1..70b6f6446f23 100644 --- a/utils/general.py +++ b/utils/general.py @@ -978,8 +978,7 @@ def non_max_suppression( c = x[:, 5:6] * (0 if agnostic else max_wh) # classes boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - if i.shape[0] > max_det: # limit detections - i = i[:max_det] + i = i[:max_det] # limit detections if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix From b2a0f1cdc579bd81b3c4543752abaa4a90a53c8b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Dec 2022 20:06:01 +0100 Subject: [PATCH 316/326] Update `onnx>=1.12.0` (#10526) --- export.py | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index baf86f1d9297..7910178b2338 100644 --- a/export.py +++ b/export.py @@ -132,7 +132,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' @try_export def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export - check_requirements('onnx') + check_requirements('onnx>=1.12.0') import onnx LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') diff --git a/requirements.txt b/requirements.txt index 85eb839df8a0..4a8649c696a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # Export ---------------------------------------------------------------------- # coremltools>=6.0 # CoreML export -# onnx>=1.9.0 # ONNX export +# onnx>=1.12.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export From 10e93d295fed1459666409751b4a897521c31b90 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Mon, 19 Dec 2022 18:27:34 +0900 Subject: [PATCH 317/326] Set a seed of generator with an option for more randomness when training several models with different seeds (#10486) * set seed with parameter Signed-off-by: Yonghye Kwon * make seed to be a large number * set seed with a parameter * set a seed of dataloader with opt for more randomness Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- train.py | 3 ++- utils/dataloaders.py | 5 +++-- utils/segment/dataloaders.py | 5 +++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 8b5446e58f2d..5d75f22b6335 100644 --- a/train.py +++ b/train.py @@ -198,7 +198,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), - shuffle=True) + shuffle=True, + seed=opt.seed) labels = np.concatenate(dataset.labels, 0) mlc = int(labels[:, 0].max()) # max label class assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6d2b27ea5e60..302cc3300d35 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -115,7 +115,8 @@ def create_dataloader(path, image_weights=False, quad=False, prefix='', - shuffle=False): + shuffle=False, + seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False @@ -140,7 +141,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) + generator.manual_seed(6148914691236517205 + seed + RANK) return loader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 9de6f0fbf903..d66b36115e3f 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -37,7 +37,8 @@ def create_dataloader(path, prefix='', shuffle=False, mask_downsample_ratio=1, - overlap_mask=False): + overlap_mask=False, + seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False @@ -64,7 +65,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) + generator.manual_seed(6148914691236517205 + seed + RANK) return loader( dataset, batch_size=batch_size, From 5545ff3545d886417b4eff12203d1af4d758cc10 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 20 Dec 2022 01:19:14 +0900 Subject: [PATCH 318/326] Sort by confidence and remove excess boxes without explicit if (#10517) * sort by confidence and remove excess boxes without explicit if Signed-off-by: Yonghye Kwon * cleanup indexing boxes for remove excess boxes it is related to https://github.com/ultralytics/yolov5/pull/10502. Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- utils/general.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 70b6f6446f23..0bbcb6e7334c 100644 --- a/utils/general.py +++ b/utils/general.py @@ -969,10 +969,7 @@ def non_max_suppression( n = x.shape[0] # number of boxes if not n: # no boxes continue - elif n > max_nms: # excess boxes - x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - else: - x = x[x[:, 4].argsort(descending=True)] # sort by confidence + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes From f72f0fec980b35d7f9575d15b326f529b5a9ac0d Mon Sep 17 00:00:00 2001 From: Amir Pourmand Date: Tue, 20 Dec 2022 18:37:43 +0330 Subject: [PATCH 319/326] Add Albumentation Default hyperparameter file (#10529) * add albumentation hyps * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename hyp.noAugmentation.yaml to hyp.no-augmentation.yaml * Update hyp.no-augmentation.yaml Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- data/hyps/hyp.no-augmentation.yaml | 35 ++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 data/hyps/hyp.no-augmentation.yaml diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml new file mode 100644 index 000000000000..8fbd5b262afa --- /dev/null +++ b/data/hyps/hyp.no-augmentation.yaml @@ -0,0 +1,35 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters when using Albumentations frameworks +# python train.py --hyp hyp.no-augmentation.yaml +# See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +# this parameters are all zero since we want to use albumentation framework +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0 # image HSV-Hue augmentation (fraction) +hsv_s: 00 # image HSV-Saturation augmentation (fraction) +hsv_v: 0 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0 # image translation (+/- fraction) +scale: 0 # image scale (+/- gain) +shear: 0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.0 # image flip left-right (probability) +mosaic: 0.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) From 887d95296642b2fdee1cafa80c0c59618ca3c2e7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:17:19 +0100 Subject: [PATCH 320/326] Created using Colaboratory --- segment/tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index e1179ffc1cc6..dc6599415480 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -149,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -176,7 +176,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -264,7 +264,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -454,7 +454,8 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -462,11 +463,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ] }, { @@ -590,4 +591,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From c765b8c274c78676ae351f159953652152725fcc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:18:09 +0100 Subject: [PATCH 321/326] Created using Colaboratory --- classify/tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 94bafba00204..06af62a1b4c1 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -149,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -183,7 +183,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -1269,7 +1269,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -1341,7 +1341,8 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -1349,11 +1350,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ] }, { @@ -1476,4 +1477,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From 96a71b17a276fa0a0b6fbdf68d579ce0603bfa2f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:19:45 +0100 Subject: [PATCH 322/326] Created using Colaboratory --- tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index cebcee3dfd24..e83617e9dce7 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -412,7 +412,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -465,7 +465,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -535,7 +535,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -566,7 +566,7 @@ "# Validate YOLOv5s on COCO val\n", "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -682,7 +682,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -860,7 +860,8 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -868,11 +869,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ], "metadata": { "id": "nWOsI5wJR1o3" @@ -972,4 +973,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 2370a5513ebf67bd10b8d15fd6353e008380bc43 Mon Sep 17 00:00:00 2001 From: "Mr.Li" <1055271769@qq.com> Date: Thu, 22 Dec 2022 21:55:09 +0800 Subject: [PATCH 323/326] Bugfix: update dataloaders.py to fix "resize to 0" (#10558) * fix bug "resize to 0" * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Use math.ceil() for resize to enforce min floor of 1 pixel Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 302cc3300d35..cbb3114e94d8 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -737,7 +737,7 @@ def load_image(self, i): r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA - im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp) return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized From 5f8054c47c4938c6df6c3f1344de774f15a18404 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Dec 2022 18:15:33 +0100 Subject: [PATCH 324/326] FROM nvcr.io/nvidia/pytorch:22.12-py3 (#10588) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 1ecf4c64f75f..26b3439c1941 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.11-py3 +FROM nvcr.io/nvidia/pytorch:22.12-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 3c1afd9ab69f289f46f6ad291e7be3cae15f6c35 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Dec 2022 14:54:43 +0100 Subject: [PATCH 325/326] ENV OMP_NUM_THREADS=1 (#10593) @Laughing-q @AyushExel setting to 1 due to recent issues Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 26b3439c1941..e0d4411118f0 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -29,7 +29,7 @@ WORKDIR /usr/src/app RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app # Set environment variables -ENV OMP_NUM_THREADS=8 +ENV OMP_NUM_THREADS=1 # Usage Examples ------------------------------------------------------------------------------------------------------- From 60cb102d0ae096bcc1ba0fa3b1f03654c7c1c0e7 Mon Sep 17 00:00:00 2001 From: triple-Mu Date: Sun, 4 Sep 2022 22:27:50 +0800 Subject: [PATCH 326/326] This is a combination of 5 commits. New PR for "https://github.com/ultralytics/yolov5/pull/7736" Remove not use Format onnxruntime and tensorrt onnx outputs fix unified outputs --- export.py | 83 ++++++++++++++++++++-- models/common.py | 179 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 257 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 7910178b2338..7a080886ead1 100644 --- a/export.py +++ b/export.py @@ -185,6 +185,74 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX return f, model_onnx +@try_export +def export_onnx_with_nms(model, im, file, opset, nms_cfg, dynamic, simplify, prefix=colorstr('ONNX:')): + # YOLOv5 ONNX export + check_requirements('onnx>=1.12.0') + import onnx + + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') + f = file.with_suffix('.onnx') + + from models.common import End2End + model = End2End(model, *nms_cfg, device=im.device) + b, topk, backend = 'batch', nms_cfg[0], nms_cfg[-1] + output_names = ['num_dets', 'boxes', 'scores', 'labels'] + output_shapes = {n: {0: b} for n in output_names} + if dynamic == 'batch': + dynamic_cfg = {'images': {0: b}, **output_shapes} + elif dynamic == 'all': + dynamic_cfg = {'images': {0: b, 2: 'height', 3: 'width'}, **output_shapes} + else: + dynamic_cfg, b = {}, im.shape[0] + + torch.onnx.export( + model.cpu() if dynamic else model, # --dynamic only compatible with cpu + im.cpu() if dynamic else im, + f, + verbose=False, + opset_version=opset, + do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False + input_names=['images'], + output_names=output_names, + dynamic_axes=dynamic_cfg) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + + # Metadata + d = {'stride': int(max(model.stride)), 'names': model.names} + for k, v in d.items(): + meta = model_onnx.metadata_props.add() + meta.key, meta.value = k, str(v) + + # Fix shape info for onnx using by TensorRT + if backend == 'trt': + shapes = [b, 1, b, topk, 4, b, topk, b, topk] + else: + shapes = [b, 1, b, 'topk', 4, b, 'topk', b, 'topk'] + for i in model_onnx.graph.output: + for j in i.type.tensor_type.shape.dim: + j.dim_param = str(shapes.pop(0)) + onnx.save(model_onnx, f) + + # Simplify + if simplify: + try: + cuda = torch.cuda.is_available() + check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) + import onnxsim + + LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + LOGGER.info(f'{prefix} simplifier failure: {e}') + return f, model_onnx + + @try_export def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export @@ -505,7 +573,7 @@ def run( opset=12, # ONNX: opset version verbose=False, # TensorRT: verbose log workspace=4, # TensorRT: workspace size (GB) - nms=False, # TF: add NMS to model + nms=False, # ONNX/TF/TensorRT: NMS config for model agnostic_nms=False, # TF: add agnostic NMS to model topk_per_class=100, # TF.js NMS: topk per class to keep topk_all=100, # TF.js NMS: topk for all classes to keep @@ -560,9 +628,9 @@ def run( f[0], _ = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) - if onnx or xml: # OpenVINO requires ONNX + if not nms and onnx or xml: # OpenVINO requires ONNX f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) - if xml: # OpenVINO + if not nms and xml: # OpenVINO f[3], _ = export_openvino(file, metadata, half) if coreml: # CoreML f[4], _ = export_coreml(model, im, file, int8, half) @@ -592,6 +660,11 @@ def run( if paddle: # PaddlePaddle f[10], _ = export_paddle(model, im, file, metadata) + if nms and (onnx or xml): + nms_cfg = [topk_all, iou_thres, conf_thres, nms] + f.append(export_onnx_with_nms(model, im, file, opset, nms_cfg, dynamic, simplify)[0]) + if xml: + f.append(export_openvino(file.with_suffix('.pt'), metadata, half)[0]) # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): @@ -622,12 +695,12 @@ def parse_opt(): parser.add_argument('--keras', action='store_true', help='TF: use Keras') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') - parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') + parser.add_argument('--dynamic', nargs='?', const='all', default=False, help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') - parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') + parser.add_argument('--nms', nargs='?', const=True, default=False, help='ONNX/TF/TensorRT: NMS config for model') parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') diff --git a/models/common.py b/models/common.py index 8b5ec1c786d8..3fac2f364156 100644 --- a/models/common.py +++ b/models/common.py @@ -8,6 +8,7 @@ import json import math import platform +import random import warnings import zipfile from collections import OrderedDict, namedtuple @@ -858,3 +859,181 @@ def forward(self, x): if isinstance(x, list): x = torch.cat(x, 1) return self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) + + +class ORT_NMS(torch.autograd.Function): + + @staticmethod + def forward(ctx, + boxes, + scores, + max_output_boxes_per_class=torch.tensor([100]), + iou_threshold=torch.tensor([0.45]), + score_threshold=torch.tensor([0.25])): + device = boxes.device + batch = scores.shape[0] + num_det = random.randint(0, 100) + batches = torch.randint(0, batch, (num_det,)).sort()[0].to(device) + idxs = torch.arange(100, 100 + num_det).to(device) + zeros = torch.zeros((num_det,), dtype=torch.int64).to(device) + selected_indices = torch.cat([batches[None], zeros[None], idxs[None]], 0).T.contiguous() + selected_indices = selected_indices.to(torch.int64) + return selected_indices + + @staticmethod + def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): + return g.op("NonMaxSuppression", boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) + + +class TRT_NMS(torch.autograd.Function): + + @staticmethod + def forward( + ctx, + boxes, + scores, + background_class=-1, + box_coding=1, + iou_threshold=0.45, + max_output_boxes=100, + plugin_version="1", + score_activation=0, + score_threshold=0.25, + ): + batch_size, num_boxes, num_classes = scores.shape + num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32) + det_boxes = torch.randn(batch_size, max_output_boxes, 4) + det_scores = torch.randn(batch_size, max_output_boxes) + det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32) + + return num_det, det_boxes, det_scores, det_classes + + @staticmethod + def symbolic(g, + boxes, + scores, + background_class=-1, + box_coding=1, + iou_threshold=0.45, + max_output_boxes=100, + plugin_version="1", + score_activation=0, + score_threshold=0.25): + out = g.op("TRT::EfficientNMS_TRT", + boxes, + scores, + background_class_i=background_class, + box_coding_i=box_coding, + iou_threshold_f=iou_threshold, + max_output_boxes_i=max_output_boxes, + plugin_version_s=plugin_version, + score_activation_i=score_activation, + score_threshold_f=score_threshold, + outputs=4) + nums, boxes, scores, classes = out + return nums, boxes, scores, classes + + +class ONNX_ORT(nn.Module): + + def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, device=None): + super().__init__() + self.device = device if device else torch.device("cpu") + self.max_obj = torch.tensor([max_obj]).to(device) + self.iou_threshold = torch.tensor([iou_thres]).to(device) + self.score_threshold = torch.tensor([score_thres]).to(device) + self.max_wh = 7680 + self.convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], + dtype=torch.float32, + device=self.device) + + def forward(self, x): + batch, anchors, _ = x.shape + boxes = x[:, :, :4] + conf = x[:, :, 4:5] + scores = x[:, :, 5:] + scores *= conf + + nms_box = boxes @ self.convert_matrix + nms_score = scores.transpose(1, 2).contiguous() + + selected_indices = ORT_NMS.apply(nms_box, nms_score, self.max_obj, self.iou_threshold, self.score_threshold) + batch_inds, cls_inds, box_inds = selected_indices.unbind(1) + selected_score = nms_score[batch_inds, cls_inds, box_inds].unsqueeze(1) + selected_box = nms_box[batch_inds, box_inds, ...] + + dets = torch.cat([selected_box, selected_score], dim=1) + + batched_dets = dets.unsqueeze(0).repeat(batch, 1, 1) + batch_template = torch.arange(0, batch, dtype=batch_inds.dtype, device=batch_inds.device) + batched_dets = batched_dets.where((batch_inds == batch_template.unsqueeze(1)).unsqueeze(-1), + batched_dets.new_zeros(1)) + + batched_labels = cls_inds.unsqueeze(0).repeat(batch, 1) + batched_labels = batched_labels.where((batch_inds == batch_template.unsqueeze(1)), + batched_labels.new_ones(1) * -1) + + N = batched_dets.shape[0] + + batched_dets = torch.cat((batched_dets, batched_dets.new_zeros((N, 1, 5))), 1) + batched_labels = torch.cat((batched_labels, -batched_labels.new_ones((N, 1))), 1) + + _, topk_inds = batched_dets[:, :, -1].sort(dim=1, descending=True) + + topk_batch_inds = torch.arange(batch, dtype=topk_inds.dtype, device=topk_inds.device).view(-1, 1) + batched_dets = batched_dets[topk_batch_inds, topk_inds, ...] + labels = batched_labels[topk_batch_inds, topk_inds, ...] + boxes, scores = batched_dets.split((4, 1), -1) + scores = scores.squeeze(-1) + num_dets = (scores > 0).sum(1, keepdim=True) + return num_dets, boxes, scores, labels + + +class ONNX_TRT(nn.Module): + + def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, device=None): + super().__init__() + self.device = device if device else torch.device('cpu') + self.background_class = -1, + self.box_coding = 1, + self.iou_threshold = iou_thres + self.max_obj = max_obj + self.plugin_version = '1' + self.score_activation = 0 + self.score_threshold = score_thres + + def forward(self, x): + boxes = x[:, :, :4] + conf = x[:, :, 4:5] + scores = x[:, :, 5:] + scores *= conf + num_dets, boxes, scores, labels = TRT_NMS.apply(boxes, scores, self.background_class, self.box_coding, + self.iou_threshold, self.max_obj, self.plugin_version, + self.score_activation, self.score_threshold) + return num_dets, boxes, scores, labels + + +class End2End(nn.Module): + + def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.25, backend='ort', device=None): + super().__init__() + device = device if device else torch.device('cpu') + self.model = model.to(device) + + if backend == 'trt': + self.patch_model = ONNX_TRT + elif backend == 'ort': + self.patch_model = ONNX_ORT + elif backend == 'ovo': + self.patch_model = ONNX_ORT + else: + raise NotImplementedError + self.end2end = self.patch_model(max_obj, iou_thres, score_thres, device) + self.end2end.eval() + self.stride = self.model.stride + self.names = self.model.names + + def forward(self, x): + x = self.model(x)[0] + x = self.end2end(x) + return x