-
Notifications
You must be signed in to change notification settings - Fork 0
/
data.py
87 lines (71 loc) · 2.55 KB
/
data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import torch
from torchvision import models, transforms
from torch.utils.data import DataLoader, Dataset
from pathlib import Path
import os
from PIL import Image
import torchvision.transforms.functional as TF
import re
# images dataset
def expand_greyscale(t):
return t.expand(3, -1, -1)
class BYOLImagesDataset(Dataset):
def __init__(self, folder, image_size, exts):
super().__init__()
self.folder = folder
self.exts = exts
self.paths = []
self.labels = []
self.angles = []
for path in self.folder.glob('**/*'):
_, ext = os.path.splitext(path)
if ext.lower() in ['.png']:
for angle in range(0, 330, 15):
self.paths.append(path)
self.angles.append(angle)
print(f'{len(self.paths)} images found')
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Lambda(expand_greyscale)
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
angle = self.angles[index]
img = Image.open(path)
img = TF.rotate(img, angle, expand=True)
return self.transform(img)
class ImagesDataset(Dataset):
def __init__(self, folder, image_size, exts):
super().__init__()
self.folder = folder
self.exts = exts
self.paths = []
self.labels = []
for path in self.folder.glob('**/*'):
label, ext = os.path.splitext(path)
if ext.lower() in ['.png']:
self.paths.append(path)
self.labels.append(re.split("\\\\|/", label)[-2])
for path in Path(f'{self.folder}').glob('**/*'):
_, ext = os.path.splitext(path)
if ext.lower() in self.exts:
self.paths.append(path)
print(f'{len(self.paths)} images found')
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Lambda(expand_greyscale)
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
label = self.labels[index]
img = Image.open(path)
img = img.convert('RGB')
return self.transform(img), label