-
Notifications
You must be signed in to change notification settings - Fork 30
/
auto_blur_video.py
123 lines (94 loc) · 3.4 KB
/
auto_blur_video.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
# author: Asmaa Mirkhan ~ 2019
import os
import argparse
import cv2
from DetectorAPI import Detector
def blurBoxes(image, boxes):
"""
Argument:
image -- the image that will be edited as a matrix
boxes -- list of boxes that will be blurred each element must be a dictionary that has [id, score, x1, y1, x2, y2] keys
Returns:
image -- the blurred image as a matrix
"""
for box in boxes:
# unpack each box
x1, y1 = box["x1"], box["y1"]
x2, y2 = box["x2"], box["y2"]
# crop the image due to the current box
sub = image[y1:y2, x1:x2]
# apply GaussianBlur on cropped area
blur = cv2.blur(sub, (25, 25))
# paste blurred image on the original image
image[y1:y2, x1:x2] = blur
return image
def main(args):
# assign model path and threshold
model_path = args.model_path
threshold = args.threshold
# create detection object
detector = Detector(model_path=model_path, name="detection")
# open video
capture = cv2.VideoCapture(args.input_video)
# video width = capture.get(3)
# video height = capture.get(4)
# video fps = capture.get(5)
if args.output_video:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output = cv2.VideoWriter(args.output_video, fourcc,
20.0, (int(capture.get(3)), int(capture.get(4))))
frame_counter = 0
while True:
# read frame by frame
_, frame = capture.read()
frame_counter += 1
# the end of the video?
if frame is None:
break
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
# real face detection
faces = detector.detect_objects(frame, threshold=threshold)
# apply blurring
frame = blurBoxes(frame, faces)
# show image
cv2.imshow('blurred', frame)
# if image will be saved then save it
if args.output_video:
output.write(frame)
print('Blurred video has been saved successfully at',
args.output_video, 'path')
# when any key has been pressed then close window and stop the program
cv2.destroyAllWindows()
if __name__ == "__main__":
# creating argument parser
parser = argparse.ArgumentParser(description='Image blurring parameters')
# adding arguments
parser.add_argument('-i',
'--input_video',
help='Path to your video',
type=str,
required=True)
parser.add_argument('-m',
'--model_path',
help='Path to .pb model',
type=str,
required=True)
parser.add_argument('-o',
'--output_video',
help='Output file path',
type=str)
parser.add_argument('-t',
'--threshold',
help='Face detection confidence',
default=0.7,
type=float)
args = parser.parse_args()
# if input image path is invalid then stop
assert os.path.isfile(args.input_video), 'Invalid input file'
# if output directory is invalid then stop
if args.output_video:
assert os.path.isdir(os.path.dirname(
args.output_video)), 'No such directory'
main(args)