Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove printing info in datasets #8683

Merged
merged 3 commits into from
Oct 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion torchvision/datasets/_stereo_matching.py
Original file line number Diff line number Diff line change
Expand Up @@ -588,7 +588,6 @@ def _download_dataset(self, root: Union[str, Path]) -> None:
for calibration in ["perfect", "imperfect"]:
scene_name = f"{split_scene}-{calibration}"
scene_url = f"{base_url}/{scene_name}.zip"
print(f"Downloading {scene_url}")
# download the scene only if it doesn't exist
if not (split_root / scene_name).exists():
download_and_extract_archive(
Expand Down
2 changes: 1 addition & 1 deletion torchvision/datasets/celeba.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def __init__(
if mask == slice(None): # if split == "all"
self.filename = splits.index
else:
self.filename = [splits.index[i] for i in torch.squeeze(torch.nonzero(mask))]
self.filename = [splits.index[i] for i in torch.squeeze(torch.nonzero(mask))] # type: ignore[arg-type]
self.identity = identity.data[mask]
self.bbox = bbox.data[mask]
self.landmarks_align = landmarks_align.data[mask]
Expand Down
2 changes: 1 addition & 1 deletion torchvision/datasets/cityscapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def __getitem__(self, index: int) -> Tuple[Any, Any]:

targets.append(target)

target = tuple(targets) if len(targets) > 1 else targets[0]
target = tuple(targets) if len(targets) > 1 else targets[0] # type: ignore[assignment]

if self.transforms is not None:
image, target = self.transforms(image, target)
Expand Down
1 change: 0 additions & 1 deletion torchvision/datasets/inaturalist.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,4 +239,3 @@ def download(self) -> None:
if not os.path.exists(orig_dir_name):
raise RuntimeError(f"Unable to find downloaded files at {orig_dir_name}")
os.rename(orig_dir_name, self.root)
print(f"Dataset version '{self.version}' has been downloaded and prepared for use")
8 changes: 0 additions & 8 deletions torchvision/datasets/kinetics.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import csv
import os
import time
import urllib
from functools import partial
from multiprocessing import Pool
Expand Down Expand Up @@ -121,7 +120,6 @@ def __init__(
self._legacy = _legacy

if _legacy:
print("Using legacy structure")
self.split_folder = root
self.split = "unknown"
output_format = "THWC"
Expand Down Expand Up @@ -157,14 +155,8 @@ def __init__(

def download_and_process_videos(self) -> None:
"""Downloads all the videos to the _root_ folder in the expected format."""
tic = time.time()
self._download_videos()
toc = time.time()
print("Elapsed time for downloading in mins ", (toc - tic) / 60)
self._make_ds_structure()
toc2 = time.time()
print("Elapsed time for processing in mins ", (toc2 - toc) / 60)
print("Elapsed time overall in mins ", (toc2 - tic) / 60)

def _download_videos(self) -> None:
"""download tarballs containing the video to "tars" folder and extract them into the _split_ folder where
Expand Down
13 changes: 7 additions & 6 deletions torchvision/datasets/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,19 +181,20 @@ def download(self) -> None:

# download files
for filename, md5 in self.resources:
errors = []
for mirror in self.mirrors:
url = f"{mirror}{filename}"
try:
print(f"Downloading {url}")
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)
except URLError as error:
print(f"Failed to download (trying next):\n{error}")
except URLError as e:
errors.append(e)
continue
finally:
print()
break
else:
raise RuntimeError(f"Error downloading {filename}")
s = f"Error downloading {filename}:\n"
for mirror, err in zip(self.mirrors, errors):
s += f"Tried {mirror}, got:\n{str(err)}\n"
raise RuntimeError(s)

def extra_repr(self) -> str:
split = "Train" if self.train is True else "Test"
Expand Down
4 changes: 0 additions & 4 deletions torchvision/datasets/phototour.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,6 @@ def _check_downloaded(self) -> bool:

def download(self) -> None:
if self._check_datafile_exists():
print(f"# Found cached data {self.data_file}")
return

if not self._check_downloaded():
Expand All @@ -157,8 +156,6 @@ def download(self) -> None:

download_url(url, self.root, filename, md5)

print(f"# Extracting data {self.data_down}\n")

import zipfile

with zipfile.ZipFile(fpath, "r") as z:
Expand All @@ -168,7 +165,6 @@ def download(self) -> None:

def cache(self) -> None:
# process and save as torch files
print(f"# Caching data {self.data_file}")

dataset = (
read_image_file(self.data_dir, self.image_ext, self.lens[self.name]),
Expand Down
5 changes: 0 additions & 5 deletions torchvision/datasets/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@ def download_url(

# check if file is already present locally
if check_integrity(fpath, md5):
print("Using downloaded and verified file: " + fpath)
return

if _is_remote_location_available():
Expand All @@ -128,12 +127,10 @@ def download_url(

# download the file
try:
print("Downloading " + url + " to " + fpath)
_urlretrieve(url, fpath)
except (urllib.error.URLError, OSError) as e: # type: ignore[attr-defined]
if url[:5] == "https":
url = url.replace("https:", "http:")
print("Failed download. Trying https -> http instead. Downloading " + url + " to " + fpath)
_urlretrieve(url, fpath)
else:
raise e
Expand Down Expand Up @@ -204,7 +201,6 @@ def download_file_from_google_drive(
os.makedirs(root, exist_ok=True)

if check_integrity(fpath, md5):
print(f"Using downloaded {'and verified ' if md5 else ''}file: {fpath}")
return

gdown.download(id=file_id, output=fpath, quiet=False, user_agent=USER_AGENT)
Expand Down Expand Up @@ -395,7 +391,6 @@ def download_and_extract_archive(
download_url(url, download_root, filename, md5)

archive = os.path.join(download_root, filename)
print(f"Extracting {archive} to {extract_root}")
extract_archive(archive, extract_root, remove_finished)


Expand Down
Loading