From 1b81609b0d669cf2bf415d515d0948a43eb2cac0 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Wed, 23 Aug 2017 21:07:06 -0700 Subject: [PATCH 01/16] update analyze_safe_search --- video/cloud-client/analyze/analyze.py | 32 ++++++++++++--------------- 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index c85f9d78a280..1de4669daa0a 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -33,18 +33,17 @@ import sys import time -from google.cloud.gapic.videointelligence.v1beta1 import enums -from google.cloud.gapic.videointelligence.v1beta1 import ( - video_intelligence_service_client) - +from google.cloud import videointelligence_v1beta2 +from google.cloud.videointelligence_v1beta2 import enums +from google.cloud.videointelligence_v1beta2 import types def analyze_safe_search(path): - """ Detects safe search features the GCS path to a video. """ - video_client = (video_intelligence_service_client. - VideoIntelligenceServiceClient()) - features = [enums.Feature.SAFE_SEARCH_DETECTION] + """ Detects explicit content from the GCS path to a video. """ + video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() + features = [enums.Feature.EXPLICIT_CONTENT_DETECTION] + operation = video_client.annotate_video(path, features) - print('\nProcessing video for safe search annotations:') + print('\nProcessing video for explicit content annotations:') while not operation.done(): sys.stdout.write('.') @@ -54,19 +53,16 @@ def analyze_safe_search(path): print('\nFinished processing.') # first result is retrieved because a single video was processed - safe_annotations = (operation.result().annotation_results[0]. - safe_search_annotations) + explicit_annotation = (operation.result().annotation_results[0]. + explicit_annotation) likely_string = ("Unknown", "Very unlikely", "Unlikely", "Possible", "Likely", "Very likely") - for note in safe_annotations: - print('Time: {}s'.format(note.time_offset / 1000000.0)) - print('\tadult: {}'.format(likely_string[note.adult])) - print('\tspoof: {}'.format(likely_string[note.spoof])) - print('\tmedical: {}'.format(likely_string[note.medical])) - print('\tracy: {}'.format(likely_string[note.racy])) - print('\tviolent: {}\n'.format(likely_string[note.violent])) + for frame in explicit_annotation.frames: + frame_time = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + print('Time: {}s'.format(frame_time)) + print('\tadult: {}'.format(likely_string[frame.pornography_likelihood])) def analyze_faces(path): From 23ce5088d2f5f246941bf1916514a04844aefc4a Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Wed, 23 Aug 2017 21:18:28 -0700 Subject: [PATCH 02/16] update analyze_shots --- video/cloud-client/analyze/analyze.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index 1de4669daa0a..03ef867d8afa 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -175,8 +175,7 @@ def analyze_labels_file(path): def analyze_shots(path): """ Detects camera shot changes. """ - video_client = (video_intelligence_service_client. - VideoIntelligenceServiceClient()) + video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() features = [enums.Feature.SHOT_CHANGE_DETECTION] operation = video_client.annotate_video(path, features) print('\nProcessing video for shot change annotations:') @@ -189,13 +188,12 @@ def analyze_shots(path): print('\nFinished processing.') # first result is retrieved because a single video was processed - shots = operation.result().annotation_results[0] + shots = operation.result().annotation_results[0].shot_annotations - for note, shot in enumerate(shots.shot_annotations): - print('\tScene {}: {} to {}'.format( - note, - shot.start_time_offset / 1000000.0, - shot.end_time_offset / 1000000.0)) + for i, shot in enumerate(shots): + start_time = shot.start_time_offset.seconds + shot.end_time_offset.nanos / 1e9 + end_time = shot.end_time_offset.seconds + shot.end_time_offset.nanos / 1e9 + print('\tScene {}: {} to {}'.format(i, start_time, end_time)) if __name__ == '__main__': From 4295e5e0e3f882d2172f8687b7c4ed17a48af92d Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 25 Aug 2017 11:34:12 -0700 Subject: [PATCH 03/16] update explicit_content_detection and test --- video/cloud-client/analyze/analyze.py | 16 ++++++++-------- video/cloud-client/analyze/analyze_test.py | 10 +++++----- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index 03ef867d8afa..417a14ea5b9c 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -23,7 +23,7 @@ python analyze.py labels gs://cloud-ml-sandbox/video/chicago.mp4 python analyze.py labels_file resources/cat.mp4 python analyze.py shots gs://demomaker/gbikes_dinosaur.mp4 - python analyze.py safe_search gs://demomaker/gbikes_dinosaur.mp4 + python analyze.py explicit_content gs://demomaker/gbikes_dinosaur.mp4 """ @@ -37,7 +37,7 @@ from google.cloud.videointelligence_v1beta2 import enums from google.cloud.videointelligence_v1beta2 import types -def analyze_safe_search(path): +def analyze_explicit_content(path): """ Detects explicit content from the GCS path to a video. """ video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() features = [enums.Feature.EXPLICIT_CONTENT_DETECTION] @@ -62,7 +62,7 @@ def analyze_safe_search(path): for frame in explicit_annotation.frames: frame_time = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 print('Time: {}s'.format(frame_time)) - print('\tadult: {}'.format(likely_string[frame.pornography_likelihood])) + print('\tpornography: {}'.format(likely_string[frame.pornography_likelihood])) def analyze_faces(path): @@ -210,9 +210,9 @@ def analyze_shots(path): analyze_labels_file_parser = subparsers.add_parser( 'labels_file', help=analyze_labels_file.__doc__) analyze_labels_file_parser.add_argument('path') - analyze_safe_search_parser = subparsers.add_parser( - 'safe_search', help=analyze_safe_search.__doc__) - analyze_safe_search_parser.add_argument('path') + analyze_explicit_content_parser = subparsers.add_parser( + 'explicit_content', help=analyze_explicit_content.__doc__) + analyze_explicit_content_parser.add_argument('path') analyze_shots_parser = subparsers.add_parser( 'shots', help=analyze_shots.__doc__) analyze_shots_parser.add_argument('path') @@ -227,5 +227,5 @@ def analyze_shots(path): analyze_labels_file(args.path) if args.command == 'shots': analyze_shots(args.path) - if args.command == 'safe_search': - analyze_safe_search(args.path) + if args.command == 'explicit_content': + analyze_explicit_content(args.path) diff --git a/video/cloud-client/analyze/analyze_test.py b/video/cloud-client/analyze/analyze_test.py index 2aac755f0735..138f0e4d2baa 100644 --- a/video/cloud-client/analyze/analyze_test.py +++ b/video/cloud-client/analyze/analyze_test.py @@ -24,7 +24,7 @@ BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] LABELS_FILE_PATH = '/video/cat.mp4' FACES_FILE_PATH = '/video/googlework.mp4' -SAFE_SEARCH_FILE_PATH = '/video/cat.mp4' +EXPLiCIT_CONTENT_FILE_PATH = '/video/cat.mp4' SHOTS_FILE_PATH = '/video/gbikes_dinosaur.mp4' @@ -53,8 +53,8 @@ def test_dino_video_labels(capsys): @pytest.mark.slow -def test_cat_safe_search(capsys): - analyze.analyze_safe_search( - 'gs://{}{}'.format(BUCKET, SAFE_SEARCH_FILE_PATH)) +def test_cat_explicit_content(capsys): + analyze.analyze_explicit_content( + 'gs://{}{}'.format(BUCKET, EXPLiCIT_CONTENT_FILE_PATH)) out, _ = capsys.readouterr() - assert 'medical' in out + assert 'pornography' in out From 5ff54fa51a8c1b8b2e8538ef39b03c621fc77940 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 28 Aug 2017 10:12:01 -0700 Subject: [PATCH 04/16] update fece detection --- video/cloud-client/analyze/analyze.py | 50 +++++++++++++++++---------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index 417a14ea5b9c..53ebec5b321a 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -67,10 +67,13 @@ def analyze_explicit_content(path): def analyze_faces(path): """ Detects faces given a GCS path. """ - video_client = (video_intelligence_service_client. - VideoIntelligenceServiceClient()) + video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() features = [enums.Feature.FACE_DETECTION] - operation = video_client.annotate_video(path, features) + + config = types.FaceDetectionConfig(include_bounding_boxes=True) + context = types.VideoContext(face_detection_config=config) + + operation = video_client.annotate_video(path, features, video_context=context) print('\nProcessing video for face annotations:') while not operation.done(): @@ -85,27 +88,39 @@ def analyze_faces(path): face_annotations) for face_id, face in enumerate(face_annotations): + print('Face {}'.format(face_id)) print('Thumbnail size: {}'.format(len(face.thumbnail))) for segment_id, segment in enumerate(face.segments): - positions = 'Entire video' - if (segment.start_time_offset != -1 or - segment.end_time_offset != -1): - positions = '{}s to {}s'.format( - segment.start_time_offset / 1000000.0, - segment.end_time_offset / 1000000.0) - - print('\tTrack {}: {}'.format(segment_id, positions)) - + start_time = segment.segment.start_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 + end_time = segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 + positions = '{}s to {}s'.format(start_time, end_time) + print('\tSegment {}: {}'.format(segment_id, positions)) + + # There are typically many frames for each face, + # here we print information on only the first frame. + frame = face.frames[0] + time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + box = frame.normalized_bounding_boxes[0] + print('First frame time offset: {}s'.format(time_offset)) + print('First frame normalized bounding box:') + print('\tleft: {}'.format(box.left)) + print('\ttop: {}'.format(box.top)) + print('\tright: {}'.format(box.right)) + print('\tbottom: {}'.format(box.bottom)) print('\n') def analyze_labels(path): """ Detects labels given a GCS path. """ - video_client = (video_intelligence_service_client. - VideoIntelligenceServiceClient()) + video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() features = [enums.Feature.LABEL_DETECTION] - operation = video_client.annotate_video(path, features) + + config = types.LabelDetectionConfig( + label_detection_mode=enums.LabelDetectionMode.FRAME_MODE) + context = types.VideoContext(label_detection_config=config) + + operation = video_client.annotate_video(path, features, video_context=context) print('\nProcessing video for label annotations:') while not operation.done(): @@ -136,8 +151,7 @@ def analyze_labels(path): def analyze_labels_file(path): """ Detects labels given a file path. """ - video_client = (video_intelligence_service_client. - VideoIntelligenceServiceClient()) + video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() features = [enums.Feature.LABEL_DETECTION] with io.open(path, "rb") as movie: @@ -193,7 +207,7 @@ def analyze_shots(path): for i, shot in enumerate(shots): start_time = shot.start_time_offset.seconds + shot.end_time_offset.nanos / 1e9 end_time = shot.end_time_offset.seconds + shot.end_time_offset.nanos / 1e9 - print('\tScene {}: {} to {}'.format(i, start_time, end_time)) + print('\tShot {}: {} to {}'.format(i, start_time, end_time)) if __name__ == '__main__': From 4ed1aceb4846c8fa8c9123233e6d67867aa60598 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 28 Aug 2017 10:57:00 -0700 Subject: [PATCH 05/16] update label detection (path) --- video/cloud-client/analyze/analyze.py | 50 ++++++++++++++++++++------- 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index 53ebec5b321a..cac7b686cf33 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -117,7 +117,7 @@ def analyze_labels(path): features = [enums.Feature.LABEL_DETECTION] config = types.LabelDetectionConfig( - label_detection_mode=enums.LabelDetectionMode.FRAME_MODE) + label_detection_mode=enums.LabelDetectionMode.SHOT_AND_FRAME_MODE) context = types.VideoContext(label_detection_config=config) operation = video_client.annotate_video(path, features, video_context=context) @@ -133,19 +133,45 @@ def analyze_labels(path): # first result is retrieved because a single video was processed results = operation.result().annotation_results[0] - for i, label in enumerate(results.label_annotations): - print('Label description: {}'.format(label.description)) - print('Locations:') + for i, segment_label in enumerate(results.segment_label_annotations): + print('Segment label description: {}'.format(segment_label.entity.description)) + for category_entity in segment_label.category_entities: + print('\tLabel category description: {}'.format(category_entity.description)) - for l, location in enumerate(label.locations): - positions = 'Entire video' - if (location.segment.start_time_offset != -1 or - location.segment.end_time_offset != -1): - positions = '{}s to {}s'.format( - location.segment.start_time_offset / 1000000.0, - location.segment.end_time_offset / 1000000.0) - print('\t{}: {}'.format(l, positions)) + for segment in segment_label.segments: + start_time = segment.segment.start_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 + end_time = segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 + positions = '{}s to {}s'.format(start_time, end_time) + confidence = segment.confidence + print('\tSegment: {}'.format(positions)) + print('\tConfidence: {}'.format(confidence)) + print('\n') + + for i, shot_label in enumerate(results.shot_label_annotations): + print('Shot label description: {}'.format(shot_label.entity.description)) + for category_entity in shot_label.category_entities: + print('\tLabel category description: {}'.format(category_entity.description)) + for shot in shot_label.segments: + start_time = shot.segment.start_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9 + end_time = shot.segment.end_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9 + positions = '{}s to {}s'.format(start_time, end_time) + confidence = shot.confidence + print('\tSegment: {}'.format(positions)) + print('\tConfidence: {}'.format(confidence)) + print('\n') + + for i, frame_label in enumerate(results.frame_label_annotations): + print('Frame label description: {}'.format(frame_label.entity.description)) + for category_entity in frame_label.category_entities: + print('\tLabel category description: {}'.format(category_entity.description)) + + # Each frame_label_annotation has many frames, + # here we print information only about the first frame. + frame = frame_label.frames[0] + time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + print('\tFirst frame time offset: {}s'.format(time_offset)) + print('\tFirst frame confidence: {}'.format(frame.confidence)) print('\n') From a1d680e907140e36f7dbd1f0b5c30b27d629d6da Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 28 Aug 2017 13:26:54 -0700 Subject: [PATCH 06/16] update label detection (file) --- video/cloud-client/analyze/analyze.py | 64 ++++++++++++++++------ video/cloud-client/analyze/analyze_test.py | 12 ++-- 2 files changed, 54 insertions(+), 22 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index cac7b686cf33..aff1dd366b18 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -133,34 +133,37 @@ def analyze_labels(path): # first result is retrieved because a single video was processed results = operation.result().annotation_results[0] + # Process video/segment level label annotations for i, segment_label in enumerate(results.segment_label_annotations): - print('Segment label description: {}'.format(segment_label.entity.description)) + print('Video label description: {}'.format(segment_label.entity.description)) for category_entity in segment_label.category_entities: print('\tLabel category description: {}'.format(category_entity.description)) - for segment in segment_label.segments: + for i, segment in enumerate(segment_label.segments): start_time = segment.segment.start_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 end_time = segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 positions = '{}s to {}s'.format(start_time, end_time) confidence = segment.confidence - print('\tSegment: {}'.format(positions)) + print('\tSegment {}: {}'.format(i, positions)) print('\tConfidence: {}'.format(confidence)) print('\n') + # Process shot level label annotations for i, shot_label in enumerate(results.shot_label_annotations): print('Shot label description: {}'.format(shot_label.entity.description)) for category_entity in shot_label.category_entities: print('\tLabel category description: {}'.format(category_entity.description)) - for shot in shot_label.segments: + for i, shot in enumerate(shot_label.segments): start_time = shot.segment.start_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9 end_time = shot.segment.end_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9 positions = '{}s to {}s'.format(start_time, end_time) confidence = shot.confidence - print('\tSegment: {}'.format(positions)) + print('\tSegment {}: {}'.format(i, positions)) print('\tConfidence: {}'.format(confidence)) print('\n') + # Process frame level label annotations for i, frame_label in enumerate(results.frame_label_annotations): print('Frame label description: {}'.format(frame_label.entity.description)) for category_entity in frame_label.category_entities: @@ -197,19 +200,48 @@ def analyze_labels_file(path): # first result is retrieved because a single video was processed results = operation.result().annotation_results[0] - for i, label in enumerate(results.label_annotations): - print('Label description: {}'.format(label.description)) - print('Locations:') + # Process video/segment level label annotations + for i, segment_label in enumerate(results.segment_label_annotations): + print('Video label description: {}'.format(segment_label.entity.description)) + for category_entity in segment_label.category_entities: + print('\tLabel category description: {}'.format(category_entity.description)) + + for i, segment in enumerate(segment_label.segments): + start_time = segment.segment.start_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 + end_time = segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 + positions = '{}s to {}s'.format(start_time, end_time) + confidence = segment.confidence + print('\tSegment {}: {}'.format(i, positions)) + print('\tConfidence: {}'.format(confidence)) + print('\n') + + # Process shot level label annotations + for i, shot_label in enumerate(results.shot_label_annotations): + print('Shot label description: {}'.format(shot_label.entity.description)) + for category_entity in shot_label.category_entities: + print('\tLabel category description: {}'.format(category_entity.description)) + + for i, shot in enumerate(shot_label.segments): + start_time = shot.segment.start_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9 + end_time = shot.segment.end_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9 + positions = '{}s to {}s'.format(start_time, end_time) + confidence = shot.confidence + print('\tSegment {}: {}'.format(i, positions)) + print('\tConfidence: {}'.format(confidence)) + print('\n') - for l, location in enumerate(label.locations): - positions = 'Entire video' - if (location.segment.start_time_offset != -1 or - location.segment.end_time_offset != -1): - positions = '{} to {}'.format( - location.segment.start_time_offset / 1000000.0, - location.segment.end_time_offset / 1000000.0) - print('\t{}: {}'.format(l, positions)) + # Process frame level label annotations + for i, frame_label in enumerate(results.frame_label_annotations): + print('Frame label description: {}'.format(frame_label.entity.description)) + for category_entity in frame_label.category_entities: + print('\tLabel category description: {}'.format(category_entity.description)) + # Each frame_label_annotation has many frames, + # here we print information only about the first frame. + frame = frame_label.frames[0] + time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + print('\tFirst frame time offset: {}s'.format(time_offset)) + print('\tFirst frame confidence: {}'.format(frame.confidence)) print('\n') diff --git a/video/cloud-client/analyze/analyze_test.py b/video/cloud-client/analyze/analyze_test.py index 138f0e4d2baa..bc9750c40a30 100644 --- a/video/cloud-client/analyze/analyze_test.py +++ b/video/cloud-client/analyze/analyze_test.py @@ -29,15 +29,15 @@ @pytest.mark.slow -def test_cat_video_shots(capsys): +def test_analyze_shots(capsys): analyze.analyze_shots( 'gs://{}{}'.format(BUCKET, SHOTS_FILE_PATH)) out, _ = capsys.readouterr() - assert 'Scene 1:' in out + assert 'Shot 1:' in out @pytest.mark.slow -def test_work_video_faces(capsys): +def test_analyze_faces(capsys): analyze.analyze_faces( 'gs://{}{}'.format(BUCKET, FACES_FILE_PATH)) out, _ = capsys.readouterr() @@ -45,15 +45,15 @@ def test_work_video_faces(capsys): @pytest.mark.slow -def test_dino_video_labels(capsys): +def test_analyze_labels(capsys): analyze.analyze_labels( 'gs://{}{}'.format(BUCKET, LABELS_FILE_PATH)) out, _ = capsys.readouterr() - assert 'Whiskers' in out + assert 'label description: cat' in out @pytest.mark.slow -def test_cat_explicit_content(capsys): +def test_analyze_explicit_content(capsys): analyze.analyze_explicit_content( 'gs://{}{}'.format(BUCKET, EXPLiCIT_CONTENT_FILE_PATH)) out, _ = capsys.readouterr() From 82c75367e4d79e301bbe7bdc6305f7c83710d670 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 28 Aug 2017 13:39:03 -0700 Subject: [PATCH 07/16] flake --- video/cloud-client/analyze/analyze.py | 90 ++++++++++++++-------- video/cloud-client/analyze/analyze_test.py | 4 +- 2 files changed, 61 insertions(+), 33 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index aff1dd366b18..c5f1bab560cb 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -37,6 +37,7 @@ from google.cloud.videointelligence_v1beta2 import enums from google.cloud.videointelligence_v1beta2 import types + def analyze_explicit_content(path): """ Detects explicit content from the GCS path to a video. """ video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() @@ -54,7 +55,7 @@ def analyze_explicit_content(path): # first result is retrieved because a single video was processed explicit_annotation = (operation.result().annotation_results[0]. - explicit_annotation) + explicit_annotation) likely_string = ("Unknown", "Very unlikely", "Unlikely", "Possible", "Likely", "Very likely") @@ -62,7 +63,8 @@ def analyze_explicit_content(path): for frame in explicit_annotation.frames: frame_time = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 print('Time: {}s'.format(frame_time)) - print('\tpornography: {}'.format(likely_string[frame.pornography_likelihood])) + print('\tpornography: {}'.format( + likely_string[frame.pornography_likelihood])) def analyze_faces(path): @@ -73,7 +75,8 @@ def analyze_faces(path): config = types.FaceDetectionConfig(include_bounding_boxes=True) context = types.VideoContext(face_detection_config=config) - operation = video_client.annotate_video(path, features, video_context=context) + operation = video_client.annotate_video( + path, features, video_context=context) print('\nProcessing video for face annotations:') while not operation.done(): @@ -92,15 +95,18 @@ def analyze_faces(path): print('Thumbnail size: {}'.format(len(face.thumbnail))) for segment_id, segment in enumerate(face.segments): - start_time = segment.segment.start_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 - end_time = segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 + start_time = (segment.segment.start_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) + end_time = (segment.segment.end_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) print('\tSegment {}: {}'.format(segment_id, positions)) # There are typically many frames for each face, # here we print information on only the first frame. frame = face.frames[0] - time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + time_offset = (frame.time_offset.seconds + + frame.time_offset.nanos / 1e9) box = frame.normalized_bounding_boxes[0] print('First frame time offset: {}s'.format(time_offset)) print('First frame normalized bounding box:') @@ -120,7 +126,8 @@ def analyze_labels(path): label_detection_mode=enums.LabelDetectionMode.SHOT_AND_FRAME_MODE) context = types.VideoContext(label_detection_config=config) - operation = video_client.annotate_video(path, features, video_context=context) + operation = video_client.annotate_video( + path, features, video_context=context) print('\nProcessing video for label annotations:') while not operation.done(): @@ -135,13 +142,17 @@ def analyze_labels(path): # Process video/segment level label annotations for i, segment_label in enumerate(results.segment_label_annotations): - print('Video label description: {}'.format(segment_label.entity.description)) + print('Video label description: {}'.format( + segment_label.entity.description)) for category_entity in segment_label.category_entities: - print('\tLabel category description: {}'.format(category_entity.description)) + print('\tLabel category description: {}'.format( + category_entity.description)) for i, segment in enumerate(segment_label.segments): - start_time = segment.segment.start_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 - end_time = segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 + start_time = (segment.segment.start_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) + end_time = (segment.segment.end_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) confidence = segment.confidence print('\tSegment {}: {}'.format(i, positions)) @@ -150,13 +161,17 @@ def analyze_labels(path): # Process shot level label annotations for i, shot_label in enumerate(results.shot_label_annotations): - print('Shot label description: {}'.format(shot_label.entity.description)) + print('Shot label description: {}'.format( + shot_label.entity.description)) for category_entity in shot_label.category_entities: - print('\tLabel category description: {}'.format(category_entity.description)) + print('\tLabel category description: {}'.format( + category_entity.description)) for i, shot in enumerate(shot_label.segments): - start_time = shot.segment.start_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9 - end_time = shot.segment.end_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9 + start_time = (shot.segment.start_time_offset.seconds + + shot.segment.end_time_offset.nanos / 1e9) + end_time = (shot.segment.end_time_offset.seconds + + shot.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) confidence = shot.confidence print('\tSegment {}: {}'.format(i, positions)) @@ -165,14 +180,17 @@ def analyze_labels(path): # Process frame level label annotations for i, frame_label in enumerate(results.frame_label_annotations): - print('Frame label description: {}'.format(frame_label.entity.description)) + print('Frame label description: {}'.format( + frame_label.entity.description)) for category_entity in frame_label.category_entities: - print('\tLabel category description: {}'.format(category_entity.description)) + print('\tLabel category description: {}'.format( + category_entity.description)) # Each frame_label_annotation has many frames, # here we print information only about the first frame. frame = frame_label.frames[0] - time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + time_offset = (frame.time_offset.seconds + + frame.time_offset.nanos / 1e9) print('\tFirst frame time offset: {}s'.format(time_offset)) print('\tFirst frame confidence: {}'.format(frame.confidence)) print('\n') @@ -202,13 +220,17 @@ def analyze_labels_file(path): # Process video/segment level label annotations for i, segment_label in enumerate(results.segment_label_annotations): - print('Video label description: {}'.format(segment_label.entity.description)) + print('Video label description: {}'.format( + segment_label.entity.description)) for category_entity in segment_label.category_entities: - print('\tLabel category description: {}'.format(category_entity.description)) + print('\tLabel category description: {}'.format( + category_entity.description)) for i, segment in enumerate(segment_label.segments): - start_time = segment.segment.start_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 - end_time = segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9 + start_time = (segment.segment.start_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) + end_time = (segment.segment.end_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) confidence = segment.confidence print('\tSegment {}: {}'.format(i, positions)) @@ -217,13 +239,17 @@ def analyze_labels_file(path): # Process shot level label annotations for i, shot_label in enumerate(results.shot_label_annotations): - print('Shot label description: {}'.format(shot_label.entity.description)) + print('Shot label description: {}'.format( + shot_label.entity.description)) for category_entity in shot_label.category_entities: - print('\tLabel category description: {}'.format(category_entity.description)) + print('\tLabel category description: {}'.format( + category_entity.description)) for i, shot in enumerate(shot_label.segments): - start_time = shot.segment.start_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9 - end_time = shot.segment.end_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9 + start_time = (shot.segment.start_time_offset.seconds + + shot.segment.end_time_offset.nanos / 1e9) + end_time = (shot.segment.end_time_offset.seconds + + shot.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) confidence = shot.confidence print('\tSegment {}: {}'.format(i, positions)) @@ -232,9 +258,11 @@ def analyze_labels_file(path): # Process frame level label annotations for i, frame_label in enumerate(results.frame_label_annotations): - print('Frame label description: {}'.format(frame_label.entity.description)) + print('Frame label description: {}'.format( + frame_label.entity.description)) for category_entity in frame_label.category_entities: - print('\tLabel category description: {}'.format(category_entity.description)) + print('\tLabel category description: {}'.format( + category_entity.description)) # Each frame_label_annotation has many frames, # here we print information only about the first frame. @@ -263,8 +291,10 @@ def analyze_shots(path): shots = operation.result().annotation_results[0].shot_annotations for i, shot in enumerate(shots): - start_time = shot.start_time_offset.seconds + shot.end_time_offset.nanos / 1e9 - end_time = shot.end_time_offset.seconds + shot.end_time_offset.nanos / 1e9 + start_time = (shot.start_time_offset.seconds + + shot.end_time_offset.nanos / 1e9) + end_time = (shot.end_time_offset.seconds + + shot.end_time_offset.nanos / 1e9) print('\tShot {}: {} to {}'.format(i, start_time, end_time)) diff --git a/video/cloud-client/analyze/analyze_test.py b/video/cloud-client/analyze/analyze_test.py index bc9750c40a30..12ee0b5cd89d 100644 --- a/video/cloud-client/analyze/analyze_test.py +++ b/video/cloud-client/analyze/analyze_test.py @@ -15,10 +15,8 @@ # limitations under the License. import os - -import pytest - import analyze +import pytest BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] From b4073fa4a2fd546976585d0e8a9594477bcc28be Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 28 Aug 2017 15:12:23 -0700 Subject: [PATCH 08/16] safe search --> explicit content --- video/cloud-client/analyze/README.rst | 12 ++++++------ video/cloud-client/analyze/analyze.py | 4 ++-- video/cloud-client/analyze/analyze_test.py | 4 +++- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/video/cloud-client/analyze/README.rst b/video/cloud-client/analyze/README.rst index f5ac39139e22..d68790fdf354 100644 --- a/video/cloud-client/analyze/README.rst +++ b/video/cloud-client/analyze/README.rst @@ -59,10 +59,10 @@ To run this sample: $ python analyze.py - usage: analyze.py [-h] {faces,labels,labels_file,safe_search,shots} ... + usage: analyze.py [-h] {faces,labels,labels_file,explicit_content,shots} ... - This application demonstrates face detection, label detection, safe search, - and shot change detection using the Google Cloud API. + This application demonstrates face detection, label detection, + explicit content, and shot change detection using the Google Cloud API. Usage Examples: @@ -70,14 +70,14 @@ To run this sample: python analyze.py labels gs://cloud-ml-sandbox/video/chicago.mp4 python analyze.py labels_file resources/cat.mp4 python analyze.py shots gs://demomaker/gbikes_dinosaur.mp4 - python analyze.py safe_search gs://demomaker/gbikes_dinosaur.mp4 + python analyze.py explicit_content gs://demomaker/gbikes_dinosaur.mp4 positional arguments: - {faces,labels,labels_file,safe_search,shots} + {faces,labels,labels_file,explicit_content,shots} faces Detects faces given a GCS path. labels Detects labels given a GCS path. labels_file Detects labels given a file path. - safe_search Detects safe search features the GCS path to a video. + explicit_content Detects explicit content from the GCS path to a video. shots Detects camera shot changes. optional arguments: diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index c5f1bab560cb..dd39eda6258e 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -14,8 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""This application demonstrates face detection, label detection, safe search, -and shot change detection using the Google Cloud API. +"""This application demonstrates face detection, label detection, +explicit content, and shot change detection using the Google Cloud API. Usage Examples: diff --git a/video/cloud-client/analyze/analyze_test.py b/video/cloud-client/analyze/analyze_test.py index 12ee0b5cd89d..bc9750c40a30 100644 --- a/video/cloud-client/analyze/analyze_test.py +++ b/video/cloud-client/analyze/analyze_test.py @@ -15,9 +15,11 @@ # limitations under the License. import os -import analyze + import pytest +import analyze + BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] LABELS_FILE_PATH = '/video/cat.mp4' From 331965f3a5b13a17989e371f5930c2aafec8dd8d Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 29 Aug 2017 12:34:32 -0700 Subject: [PATCH 09/16] update faces tutorial --- video/cloud-client/faces/faces.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/video/cloud-client/faces/faces.py b/video/cloud-client/faces/faces.py index b6221fa2e6fb..f407158ac4fd 100644 --- a/video/cloud-client/faces/faces.py +++ b/video/cloud-client/faces/faces.py @@ -32,17 +32,16 @@ import sys import time -from google.cloud.gapic.videointelligence.v1beta1 import enums -from google.cloud.gapic.videointelligence.v1beta1 import ( - video_intelligence_service_client) +from google.cloud import videointelligence_v1beta2 +from google.cloud.videointelligence_v1beta2 import enums +from google.cloud.videointelligence_v1beta2 import types # [END imports] def analyze_faces(path): # [START construct_request] """ Detects faces given a GCS path. """ - video_client = (video_intelligence_service_client. - VideoIntelligenceServiceClient()) + video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() features = [enums.Feature.FACE_DETECTION] operation = video_client.annotate_video(path, features) # [END construct_request] @@ -66,10 +65,12 @@ def analyze_faces(path): print('Thumbnail size: {}'.format(len(face.thumbnail))) for segment_id, segment in enumerate(face.segments): - print('Track {}: {} to {}'.format( - segment_id, - segment.start_time_offset, - segment.end_time_offset)) + start_time = (segment.segment.start_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) + end_time = (segment.segment.end_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) + positions = '{}s to {}s'.format(start_time, end_time) + print('\tSegment {}: {}'.format(segment_id, positions)) # [END parse_response] From 0768a7b996d5cd8bc9110440fcc2b5f4a737461a Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 29 Aug 2017 12:39:14 -0700 Subject: [PATCH 10/16] update client library quickstart --- video/cloud-client/faces/faces.py | 1 - video/cloud-client/quickstart/quickstart.py | 35 ++++++++++--------- .../quickstart/quickstart_test.py | 2 +- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/video/cloud-client/faces/faces.py b/video/cloud-client/faces/faces.py index f407158ac4fd..a365d6a6bb65 100644 --- a/video/cloud-client/faces/faces.py +++ b/video/cloud-client/faces/faces.py @@ -34,7 +34,6 @@ from google.cloud import videointelligence_v1beta2 from google.cloud.videointelligence_v1beta2 import enums -from google.cloud.videointelligence_v1beta2 import types # [END imports] diff --git a/video/cloud-client/quickstart/quickstart.py b/video/cloud-client/quickstart/quickstart.py index bfb5bca91161..be84b7402610 100644 --- a/video/cloud-client/quickstart/quickstart.py +++ b/video/cloud-client/quickstart/quickstart.py @@ -26,12 +26,10 @@ def run_quickstart(): import sys import time - from google.cloud.gapic.videointelligence.v1beta1 import enums - from google.cloud.gapic.videointelligence.v1beta1 import ( - video_intelligence_service_client) + from google.cloud import videointelligence_v1beta2 + from google.cloud.videointelligence_v1beta2 import enums - video_client = (video_intelligence_service_client. - VideoIntelligenceServiceClient()) + video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() features = [enums.Feature.LABEL_DETECTION] operation = video_client.annotate_video('gs://demomaker/cat.mp4', features) print('\nProcessing video for label annotations:') @@ -46,19 +44,22 @@ def run_quickstart(): # first result is retrieved because a single video was processed results = operation.result().annotation_results[0] - for label in results.label_annotations: - print('Label description: {}'.format(label.description)) - print('Locations:') - - for l, location in enumerate(label.locations): - positions = 'Entire video' - if (location.segment.start_time_offset != -1 or - location.segment.end_time_offset != -1): - positions = '{} to {}'.format( - location.segment.start_time_offset / 1000000.0, - location.segment.end_time_offset / 1000000.0) - print('\t{}: {}'.format(l, positions)) + for i, segment_label in enumerate(results.segment_label_annotations): + print('Video label description: {}'.format( + segment_label.entity.description)) + for category_entity in segment_label.category_entities: + print('\tLabel category description: {}'.format( + category_entity.description)) + for i, segment in enumerate(segment_label.segments): + start_time = (segment.segment.start_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) + end_time = (segment.segment.end_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) + positions = '{}s to {}s'.format(start_time, end_time) + confidence = segment.confidence + print('\tSegment {}: {}'.format(i, positions)) + print('\tConfidence: {}'.format(confidence)) print('\n') # [END videointelligence_quickstart] diff --git a/video/cloud-client/quickstart/quickstart_test.py b/video/cloud-client/quickstart/quickstart_test.py index 9712f4107621..1d1534c46cfe 100644 --- a/video/cloud-client/quickstart/quickstart_test.py +++ b/video/cloud-client/quickstart/quickstart_test.py @@ -23,4 +23,4 @@ def test_quickstart(capsys): quickstart.run_quickstart() out, _ = capsys.readouterr() - assert 'Whiskers' in out + assert 'Video label description: cat' in out From d79fc86791277b5e21c52f493c05d895484b1f4b Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 29 Aug 2017 12:54:15 -0700 Subject: [PATCH 11/16] update shotchange tutorial --- video/cloud-client/shotchange/shotchange.py | 23 +++++++++---------- .../shotchange/shotchange_test.py | 2 +- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/video/cloud-client/shotchange/shotchange.py b/video/cloud-client/shotchange/shotchange.py index 418b89fe2800..bad449f5852a 100644 --- a/video/cloud-client/shotchange/shotchange.py +++ b/video/cloud-client/shotchange/shotchange.py @@ -32,17 +32,15 @@ import sys import time -from google.cloud.gapic.videointelligence.v1beta1 import enums -from google.cloud.gapic.videointelligence.v1beta1 import ( - video_intelligence_service_client) +from google.cloud import videointelligence_v1beta2 +from google.cloud.videointelligence_v1beta2 import enums # [END imports] def analyze_shots(path): """ Detects camera shot changes. """ # [START construct_request] - video_client = (video_intelligence_service_client. - VideoIntelligenceServiceClient()) + video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() features = [enums.Feature.SHOT_CHANGE_DETECTION] operation = video_client.annotate_video(path, features) # [END construct_request] @@ -58,13 +56,14 @@ def analyze_shots(path): # [END check_operation] # [START parse_response] - shots = operation.result().annotation_results[0] - - for note, shot in enumerate(shots.shot_annotations): - print('Scene {}: {} to {}'.format( - note, - shot.start_time_offset, - shot.end_time_offset)) + shots = operation.result().annotation_results[0].shot_annotations + + for i, shot in enumerate(shots): + start_time = (shot.start_time_offset.seconds + + shot.end_time_offset.nanos / 1e9) + end_time = (shot.end_time_offset.seconds + + shot.end_time_offset.nanos / 1e9) + print('\tShot {}: {} to {}'.format(i, start_time, end_time)) # [END parse_response] diff --git a/video/cloud-client/shotchange/shotchange_test.py b/video/cloud-client/shotchange/shotchange_test.py index 2c637036fcfa..a004f56d5cd0 100644 --- a/video/cloud-client/shotchange/shotchange_test.py +++ b/video/cloud-client/shotchange/shotchange_test.py @@ -29,4 +29,4 @@ def test_shots_dino(capsys): shotchange.analyze_shots( 'gs://{}{}'.format(BUCKET, SHOTS_FILE_PATH)) out, _ = capsys.readouterr() - assert 'Scene 1:' in out + assert 'Shot 1:' in out From ac39985e584a1e4386b17437bedbd050fe57a765 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Tue, 29 Aug 2017 14:12:25 -0700 Subject: [PATCH 12/16] update labels tutorial --- video/cloud-client/labels/labels.py | 34 ++++++++++++++---------- video/cloud-client/labels/labels_test.py | 2 +- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/video/cloud-client/labels/labels.py b/video/cloud-client/labels/labels.py index 7e0f9a0e2d42..699b3930f737 100644 --- a/video/cloud-client/labels/labels.py +++ b/video/cloud-client/labels/labels.py @@ -32,17 +32,15 @@ import sys import time -from google.cloud.gapic.videointelligence.v1beta1 import enums -from google.cloud.gapic.videointelligence.v1beta1 import ( - video_intelligence_service_client) +from google.cloud import videointelligence_v1beta2 +from google.cloud.videointelligence_v1beta2 import enums # [END imports] def analyze_labels(path): """ Detects labels given a GCS path. """ # [START construct_request] - video_client = (video_intelligence_service_client. - VideoIntelligenceServiceClient()) + video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient() features = [enums.Feature.LABEL_DETECTION] operation = video_client.annotate_video(path, features) # [END construct_request] @@ -60,15 +58,23 @@ def analyze_labels(path): # [START parse_response] results = operation.result().annotation_results[0] - for label in results.label_annotations: - print('Label description: {}'.format(label.description)) - print('Locations:') - - for l, location in enumerate(label.locations): - print('\t{}: {} to {}'.format( - l, - location.segment.start_time_offset, - location.segment.end_time_offset)) + for i, segment_label in enumerate(results.segment_label_annotations): + print('Video label description: {}'.format( + segment_label.entity.description)) + for category_entity in segment_label.category_entities: + print('\tLabel category description: {}'.format( + category_entity.description)) + + for i, segment in enumerate(segment_label.segments): + start_time = (segment.segment.start_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) + end_time = (segment.segment.end_time_offset.seconds + + segment.segment.end_time_offset.nanos / 1e9) + positions = '{}s to {}s'.format(start_time, end_time) + confidence = segment.confidence + print('\tSegment {}: {}'.format(i, positions)) + print('\tConfidence: {}'.format(confidence)) + print('\n') # [END parse_response] diff --git a/video/cloud-client/labels/labels_test.py b/video/cloud-client/labels/labels_test.py index cd571b087f1e..e1c751e99b8c 100644 --- a/video/cloud-client/labels/labels_test.py +++ b/video/cloud-client/labels/labels_test.py @@ -29,4 +29,4 @@ def test_feline_video_labels(capsys): labels.analyze_labels( 'gs://{}{}'.format(BUCKET, LABELS_FILE_PATH)) out, _ = capsys.readouterr() - assert 'Whiskers' in out + assert 'Video label description: cat' in out From 146686f6e6924133b8a5762823df48d289437f63 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 15 Sep 2017 15:53:50 -0700 Subject: [PATCH 13/16] correct spelling --- video/cloud-client/analyze/analyze_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/video/cloud-client/analyze/analyze_test.py b/video/cloud-client/analyze/analyze_test.py index bc9750c40a30..f1f777384211 100644 --- a/video/cloud-client/analyze/analyze_test.py +++ b/video/cloud-client/analyze/analyze_test.py @@ -24,7 +24,7 @@ BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] LABELS_FILE_PATH = '/video/cat.mp4' FACES_FILE_PATH = '/video/googlework.mp4' -EXPLiCIT_CONTENT_FILE_PATH = '/video/cat.mp4' +EXPLICIT_CONTENT_FILE_PATH = '/video/cat.mp4' SHOTS_FILE_PATH = '/video/gbikes_dinosaur.mp4' @@ -55,6 +55,6 @@ def test_analyze_labels(capsys): @pytest.mark.slow def test_analyze_explicit_content(capsys): analyze.analyze_explicit_content( - 'gs://{}{}'.format(BUCKET, EXPLiCIT_CONTENT_FILE_PATH)) + 'gs://{}{}'.format(BUCKET, EXPLICIT_CONTENT_FILE_PATH)) out, _ = capsys.readouterr() assert 'pornography' in out From 2f81d1de3fe211590d1188347b47f94ff059a741 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 18 Sep 2017 15:01:10 -0700 Subject: [PATCH 14/16] correction start_time_offset --- video/cloud-client/analyze/analyze.py | 12 ++++++------ video/cloud-client/faces/faces.py | 2 +- video/cloud-client/labels/labels.py | 2 +- video/cloud-client/quickstart/quickstart.py | 2 +- video/cloud-client/shotchange/shotchange.py | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index dd39eda6258e..738f436257f8 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -96,7 +96,7 @@ def analyze_faces(path): for segment_id, segment in enumerate(face.segments): start_time = (segment.segment.start_time_offset.seconds + - segment.segment.end_time_offset.nanos / 1e9) + segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) @@ -150,7 +150,7 @@ def analyze_labels(path): for i, segment in enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds + - segment.segment.end_time_offset.nanos / 1e9) + segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) @@ -169,7 +169,7 @@ def analyze_labels(path): for i, shot in enumerate(shot_label.segments): start_time = (shot.segment.start_time_offset.seconds + - shot.segment.end_time_offset.nanos / 1e9) + shot.segment.start_time_offset.nanos / 1e9) end_time = (shot.segment.end_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) @@ -228,7 +228,7 @@ def analyze_labels_file(path): for i, segment in enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds + - segment.segment.end_time_offset.nanos / 1e9) + segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) @@ -247,7 +247,7 @@ def analyze_labels_file(path): for i, shot in enumerate(shot_label.segments): start_time = (shot.segment.start_time_offset.seconds + - shot.segment.end_time_offset.nanos / 1e9) + shot.segment.start_time_offset.nanos / 1e9) end_time = (shot.segment.end_time_offset.seconds + shot.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) @@ -292,7 +292,7 @@ def analyze_shots(path): for i, shot in enumerate(shots): start_time = (shot.start_time_offset.seconds + - shot.end_time_offset.nanos / 1e9) + shot.start_time_offset.nanos / 1e9) end_time = (shot.end_time_offset.seconds + shot.end_time_offset.nanos / 1e9) print('\tShot {}: {} to {}'.format(i, start_time, end_time)) diff --git a/video/cloud-client/faces/faces.py b/video/cloud-client/faces/faces.py index a365d6a6bb65..3bca1510f9f4 100644 --- a/video/cloud-client/faces/faces.py +++ b/video/cloud-client/faces/faces.py @@ -65,7 +65,7 @@ def analyze_faces(path): for segment_id, segment in enumerate(face.segments): start_time = (segment.segment.start_time_offset.seconds + - segment.segment.end_time_offset.nanos / 1e9) + segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) diff --git a/video/cloud-client/labels/labels.py b/video/cloud-client/labels/labels.py index 699b3930f737..5f45b8313747 100644 --- a/video/cloud-client/labels/labels.py +++ b/video/cloud-client/labels/labels.py @@ -67,7 +67,7 @@ def analyze_labels(path): for i, segment in enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds + - segment.segment.end_time_offset.nanos / 1e9) + segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) diff --git a/video/cloud-client/quickstart/quickstart.py b/video/cloud-client/quickstart/quickstart.py index be84b7402610..1f31d46657a9 100644 --- a/video/cloud-client/quickstart/quickstart.py +++ b/video/cloud-client/quickstart/quickstart.py @@ -53,7 +53,7 @@ def run_quickstart(): for i, segment in enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds + - segment.segment.end_time_offset.nanos / 1e9) + segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) diff --git a/video/cloud-client/shotchange/shotchange.py b/video/cloud-client/shotchange/shotchange.py index bad449f5852a..4db4ca3bc0ac 100644 --- a/video/cloud-client/shotchange/shotchange.py +++ b/video/cloud-client/shotchange/shotchange.py @@ -60,7 +60,7 @@ def analyze_shots(path): for i, shot in enumerate(shots): start_time = (shot.start_time_offset.seconds + - shot.end_time_offset.nanos / 1e9) + shot.start_time_offset.nanos / 1e9) end_time = (shot.end_time_offset.seconds + shot.end_time_offset.nanos / 1e9) print('\tShot {}: {} to {}'.format(i, start_time, end_time)) From 34f5001c8a3822c3b58a3fa635cfe35387a0be0f Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 18 Sep 2017 15:04:58 -0700 Subject: [PATCH 15/16] import order --- video/cloud-client/analyze/analyze_test.py | 4 +--- video/cloud-client/faces/faces_test.py | 3 +-- video/cloud-client/labels/labels_test.py | 3 +-- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/video/cloud-client/analyze/analyze_test.py b/video/cloud-client/analyze/analyze_test.py index f1f777384211..487b466dde83 100644 --- a/video/cloud-client/analyze/analyze_test.py +++ b/video/cloud-client/analyze/analyze_test.py @@ -15,10 +15,8 @@ # limitations under the License. import os - -import pytest - import analyze +import pytest BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] diff --git a/video/cloud-client/faces/faces_test.py b/video/cloud-client/faces/faces_test.py index 5eb4075dedb3..9ca80920fa80 100644 --- a/video/cloud-client/faces/faces_test.py +++ b/video/cloud-client/faces/faces_test.py @@ -15,10 +15,9 @@ # limitations under the License. import os - +import faces import pytest -import faces BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] FACES_FILE_PATH = '/video/googlework.mp4' diff --git a/video/cloud-client/labels/labels_test.py b/video/cloud-client/labels/labels_test.py index e1c751e99b8c..31a68358f6fa 100644 --- a/video/cloud-client/labels/labels_test.py +++ b/video/cloud-client/labels/labels_test.py @@ -15,10 +15,9 @@ # limitations under the License. import os - +import labels import pytest -import labels BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] LABELS_FILE_PATH = '/video/cat.mp4' From 64fcf6d8117c110fb67c55f922865da499fc9ec1 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 18 Sep 2017 16:00:46 -0700 Subject: [PATCH 16/16] rebased --- video/cloud-client/analyze/requirements.txt | 2 +- video/cloud-client/faces/requirements.txt | 2 +- video/cloud-client/labels/requirements.txt | 2 +- video/cloud-client/quickstart/requirements.txt | 2 +- video/cloud-client/shotchange/requirements.txt | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/video/cloud-client/analyze/requirements.txt b/video/cloud-client/analyze/requirements.txt index 28c02728605f..d3b18758344b 100644 --- a/video/cloud-client/analyze/requirements.txt +++ b/video/cloud-client/analyze/requirements.txt @@ -1 +1 @@ -google-cloud-videointelligence==0.26.0 +google-cloud-videointelligence==0.27.2 diff --git a/video/cloud-client/faces/requirements.txt b/video/cloud-client/faces/requirements.txt index 28c02728605f..d3b18758344b 100644 --- a/video/cloud-client/faces/requirements.txt +++ b/video/cloud-client/faces/requirements.txt @@ -1 +1 @@ -google-cloud-videointelligence==0.26.0 +google-cloud-videointelligence==0.27.2 diff --git a/video/cloud-client/labels/requirements.txt b/video/cloud-client/labels/requirements.txt index 28c02728605f..d3b18758344b 100644 --- a/video/cloud-client/labels/requirements.txt +++ b/video/cloud-client/labels/requirements.txt @@ -1 +1 @@ -google-cloud-videointelligence==0.26.0 +google-cloud-videointelligence==0.27.2 diff --git a/video/cloud-client/quickstart/requirements.txt b/video/cloud-client/quickstart/requirements.txt index 28c02728605f..d3b18758344b 100644 --- a/video/cloud-client/quickstart/requirements.txt +++ b/video/cloud-client/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-videointelligence==0.26.0 +google-cloud-videointelligence==0.27.2 diff --git a/video/cloud-client/shotchange/requirements.txt b/video/cloud-client/shotchange/requirements.txt index 28c02728605f..d3b18758344b 100644 --- a/video/cloud-client/shotchange/requirements.txt +++ b/video/cloud-client/shotchange/requirements.txt @@ -1 +1 @@ -google-cloud-videointelligence==0.26.0 +google-cloud-videointelligence==0.27.2