-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathimgutils.py
64 lines (41 loc) · 1.64 KB
/
imgutils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import cv2
import numpy as np
import torch
# Define the helper function
def decode_segmap(image, colormap, nc):
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, nc):
idx = image == l
r[idx] = colormap[l][0]
g[idx] = colormap[l][1]
b[idx] = colormap[l][2]
rgb = np.stack([r, g, b], axis=2)
return rgb
def image_overlay(image, segmented_image):
alpha = 1 # transparency for the original image
beta = 0.75 # transparency for the segmentation map
gamma = 0 # scalar added to each sum
image = np.array(image)
segmented_image = cv2.cvtColor(segmented_image, cv2.COLOR_RGB2BGR)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.addWeighted(image, alpha, segmented_image, beta, gamma, image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def detect_classes(img, cats, nb_class):
detected = []
for lp in range(0, nb_class):
idx = img == lp
if idx.any():
detected.append(lp)
return [cats[cnb] for cnb in detected]
def segment_map(output, img, colormap, cats, nb_class):
om = torch.argmax(output.squeeze(), dim=0).detach().cpu().numpy()
cnames = detect_classes(om, cats, nb_class)
segmented_image = decode_segmap(om, colormap, nb_class)
# Resize to original image size
segmented_image = cv2.resize(segmented_image, om.shape, cv2.INTER_CUBIC)
np_img = np.array(img * 255, dtype=np.uint8)
overlayed_image = image_overlay(np_img, segmented_image)
return segmented_image, overlayed_image, cnames