diff --git a/torchvision/datasets/_stereo_matching.py b/torchvision/datasets/_stereo_matching.py index 1deaab7e2f3..09961211cc2 100644 --- a/torchvision/datasets/_stereo_matching.py +++ b/torchvision/datasets/_stereo_matching.py @@ -588,7 +588,6 @@ def _download_dataset(self, root: Union[str, Path]) -> None: for calibration in ["perfect", "imperfect"]: scene_name = f"{split_scene}-{calibration}" scene_url = f"{base_url}/{scene_name}.zip" - print(f"Downloading {scene_url}") # download the scene only if it doesn't exist if not (split_root / scene_name).exists(): download_and_extract_archive( diff --git a/torchvision/datasets/celeba.py b/torchvision/datasets/celeba.py index 7ff0595166b..c15120af5a5 100644 --- a/torchvision/datasets/celeba.py +++ b/torchvision/datasets/celeba.py @@ -105,7 +105,7 @@ def __init__( if mask == slice(None): # if split == "all" self.filename = splits.index else: - self.filename = [splits.index[i] for i in torch.squeeze(torch.nonzero(mask))] + self.filename = [splits.index[i] for i in torch.squeeze(torch.nonzero(mask))] # type: ignore[arg-type] self.identity = identity.data[mask] self.bbox = bbox.data[mask] self.landmarks_align = landmarks_align.data[mask] diff --git a/torchvision/datasets/cityscapes.py b/torchvision/datasets/cityscapes.py index 969642553a1..97a47c07beb 100644 --- a/torchvision/datasets/cityscapes.py +++ b/torchvision/datasets/cityscapes.py @@ -192,7 +192,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]: targets.append(target) - target = tuple(targets) if len(targets) > 1 else targets[0] + target = tuple(targets) if len(targets) > 1 else targets[0] # type: ignore[assignment] if self.transforms is not None: image, target = self.transforms(image, target) diff --git a/torchvision/datasets/inaturalist.py b/torchvision/datasets/inaturalist.py index 68f9a77f56a..0379f3509c9 100644 --- a/torchvision/datasets/inaturalist.py +++ b/torchvision/datasets/inaturalist.py @@ -239,4 +239,3 @@ def download(self) -> None: if not os.path.exists(orig_dir_name): raise RuntimeError(f"Unable to find downloaded files at {orig_dir_name}") os.rename(orig_dir_name, self.root) - print(f"Dataset version '{self.version}' has been downloaded and prepared for use") diff --git a/torchvision/datasets/kinetics.py b/torchvision/datasets/kinetics.py index 868c08e2c30..7845e91b5b2 100644 --- a/torchvision/datasets/kinetics.py +++ b/torchvision/datasets/kinetics.py @@ -1,6 +1,5 @@ import csv import os -import time import urllib from functools import partial from multiprocessing import Pool @@ -121,7 +120,6 @@ def __init__( self._legacy = _legacy if _legacy: - print("Using legacy structure") self.split_folder = root self.split = "unknown" output_format = "THWC" @@ -157,14 +155,8 @@ def __init__( def download_and_process_videos(self) -> None: """Downloads all the videos to the _root_ folder in the expected format.""" - tic = time.time() self._download_videos() - toc = time.time() - print("Elapsed time for downloading in mins ", (toc - tic) / 60) self._make_ds_structure() - toc2 = time.time() - print("Elapsed time for processing in mins ", (toc2 - toc) / 60) - print("Elapsed time overall in mins ", (toc2 - tic) / 60) def _download_videos(self) -> None: """download tarballs containing the video to "tars" folder and extract them into the _split_ folder where diff --git a/torchvision/datasets/mnist.py b/torchvision/datasets/mnist.py index b2bbcc6fbae..fd145553529 100644 --- a/torchvision/datasets/mnist.py +++ b/torchvision/datasets/mnist.py @@ -181,19 +181,20 @@ def download(self) -> None: # download files for filename, md5 in self.resources: + errors = [] for mirror in self.mirrors: url = f"{mirror}{filename}" try: - print(f"Downloading {url}") download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5) - except URLError as error: - print(f"Failed to download (trying next):\n{error}") + except URLError as e: + errors.append(e) continue - finally: - print() break else: - raise RuntimeError(f"Error downloading {filename}") + s = f"Error downloading {filename}:\n" + for mirror, err in zip(self.mirrors, errors): + s += f"Tried {mirror}, got:\n{str(err)}\n" + raise RuntimeError(s) def extra_repr(self) -> str: split = "Train" if self.train is True else "Test" diff --git a/torchvision/datasets/phototour.py b/torchvision/datasets/phototour.py index fd2466a3d36..9511f0626b4 100644 --- a/torchvision/datasets/phototour.py +++ b/torchvision/datasets/phototour.py @@ -145,7 +145,6 @@ def _check_downloaded(self) -> bool: def download(self) -> None: if self._check_datafile_exists(): - print(f"# Found cached data {self.data_file}") return if not self._check_downloaded(): @@ -157,8 +156,6 @@ def download(self) -> None: download_url(url, self.root, filename, md5) - print(f"# Extracting data {self.data_down}\n") - import zipfile with zipfile.ZipFile(fpath, "r") as z: @@ -168,7 +165,6 @@ def download(self) -> None: def cache(self) -> None: # process and save as torch files - print(f"# Caching data {self.data_file}") dataset = ( read_image_file(self.data_dir, self.image_ext, self.lens[self.name]), diff --git a/torchvision/datasets/utils.py b/torchvision/datasets/utils.py index f65eb535459..8bf310896c7 100644 --- a/torchvision/datasets/utils.py +++ b/torchvision/datasets/utils.py @@ -112,7 +112,6 @@ def download_url( # check if file is already present locally if check_integrity(fpath, md5): - print("Using downloaded and verified file: " + fpath) return if _is_remote_location_available(): @@ -128,12 +127,10 @@ def download_url( # download the file try: - print("Downloading " + url + " to " + fpath) _urlretrieve(url, fpath) except (urllib.error.URLError, OSError) as e: # type: ignore[attr-defined] if url[:5] == "https": url = url.replace("https:", "http:") - print("Failed download. Trying https -> http instead. Downloading " + url + " to " + fpath) _urlretrieve(url, fpath) else: raise e @@ -204,7 +201,6 @@ def download_file_from_google_drive( os.makedirs(root, exist_ok=True) if check_integrity(fpath, md5): - print(f"Using downloaded {'and verified ' if md5 else ''}file: {fpath}") return gdown.download(id=file_id, output=fpath, quiet=False, user_agent=USER_AGENT) @@ -395,7 +391,6 @@ def download_and_extract_archive( download_url(url, download_root, filename, md5) archive = os.path.join(download_root, filename) - print(f"Extracting {archive} to {extract_root}") extract_archive(archive, extract_root, remove_finished)