From 4491167d9a5982feef4ef028c82afe219a2d0141 Mon Sep 17 00:00:00 2001 From: Gus Class Date: Wed, 8 Mar 2017 09:44:40 -0800 Subject: [PATCH 1/5] Adds video sample --- video/cloud-client/README.md | 32 +++++++ video/cloud-client/analyze.py | 135 ++++++++++++++++++++++++++++ video/cloud-client/analyze_test.py | 47 ++++++++++ video/cloud-client/requirements.txt | 1 + 4 files changed, 215 insertions(+) create mode 100644 video/cloud-client/README.md create mode 100644 video/cloud-client/analyze.py create mode 100644 video/cloud-client/analyze_test.py create mode 100644 video/cloud-client/requirements.txt diff --git a/video/cloud-client/README.md b/video/cloud-client/README.md new file mode 100644 index 00000000000..52fdd058d6b --- /dev/null +++ b/video/cloud-client/README.md @@ -0,0 +1,32 @@ +# Google Cloud Video Intelligence Sample + +Demonstrates face detection, label detection, and shot change detection using +the Google Cloud API.. + +## Setup +Please follow the [Set Up Your Project](https://cloud.google.com/video-intelligence/docs/getting-started#set_up_your_project) +steps in the Quickstart doc to create a project and enable the Google Cloud +Video Intelligence API. Following those steps, make sure that you +[Set Up a Service Account](https://cloud.google.com/video-intelligence/docs/common/auth#set_up_a_service_account), +and export the following environment variable: + +``` +export GOOGLE_APPLICATION_CREDENTIALS=/path/to/your-project-credentials.json +``` + +## Run the sample + +Install [pip](https://pip.pypa.io/en/stable/installing) if not already installed. + +Install the necessary libraries using pip: + +```sh +$ pip install -r requirements.txt +``` + +Run the sample, for example: +``` +python analyze.py faces gs://demomaker/google_gmail.mp4 +python analyze.py labels gs://demomaker/cat.mp4 +python analyze.py shots gs://demomaker/gbikes_dinosaur.mp4 +``` diff --git a/video/cloud-client/analyze.py b/video/cloud-client/analyze.py new file mode 100644 index 00000000000..8061999be80 --- /dev/null +++ b/video/cloud-client/analyze.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python + +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations with the +Google Cloud Video Intelligence API. + +For more information, check out the documentation at +https://cloud.google.com/videointelligence/docs. +""" + +import argparse +import sys +import time + +from google.cloud.gapic.videointelligence.v1beta1 import enums +from google.cloud.gapic.videointelligence.v1beta1 import ( + video_intelligence_service_client) + + +def analyze_faces(path): + """ Detects faces given a GCS path. """ + video_client = (video_intelligence_service_client. + VideoIntelligenceServiceClient()) + features = [enums.Feature.FACE_DETECTION] + operation = video_client.annotate_video(path, features) + print('\nProcessing video for label annotations:') + + while not operation.done(): + sys.stdout.write('.') + sys.stdout.flush() + time.sleep(1) + + print('\nFinished processing.') + + # first result is retrieved because a single video was processed + face_annotations = (operation.result().annotation_results[0]. + face_annotations) + + for face_id, face in enumerate(face_annotations): + print('Thumbnail size: {}'.format(len(face.thumbnail))) + + for segment_id, segment in enumerate(face.segments): + print('Track {}: {} to {}'.format(segment_id, + segment.start_time_offset, + segment.end_time_offset)) + + +def analyze_labels(path): + """ Detects labels given a GCS path. """ + video_client = (video_intelligence_service_client. + VideoIntelligenceServiceClient()) + features = [enums.Feature.LABEL_DETECTION] + operation = video_client.annotate_video(path, features) + print('\nProcessing video for label annotations:') + + while not operation.done(): + sys.stdout.write('.') + sys.stdout.flush() + time.sleep(1) + + print('\nFinished processing.') + + # first result is retrieved because a single video was processed + results = operation.result().annotation_results[0] + + for i, label in enumerate(results.label_annotations): + print('Label description: {}'.format(label.description)) + print('Locations:') + + for l, location in enumerate(label.locations): + print('\t{}: {} to {}'.format(l, + location.segment.start_time_offset, + location.segment.end_time_offset)) + + +def analyze_shots(path): + """ Detects camera shot changes. """ + video_client = (video_intelligence_service_client. + VideoIntelligenceServiceClient()) + features = [enums.Feature.SHOT_CHANGE_DETECTION] + operation = video_client.annotate_video(path, features) + print('\nProcessing video for shot change annotations:') + + while not operation.done(): + sys.stdout.write('.') + sys.stdout.flush() + time.sleep(1) + + print('\nFinished processing.') + + # first result is retrieved because a single video was processed + shots = operation.result().annotation_results[0] + + for note, shot in enumerate(shots.shot_annotations): + print('Scene {}: {} to {}'.format(note, + shot.start_time_offset, + shot.end_time_offset)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + analyze_faces_parser = subparsers.add_parser( + 'faces', help=analyze_faces.__doc__) + analyze_faces_parser.add_argument('path') + analyze_labels_parser = subparsers.add_parser( + 'labels', help=analyze_labels.__doc__) + analyze_labels_parser.add_argument('path') + analyze_shots_parser = subparsers.add_parser( + 'shots', help=analyze_shots.__doc__) + analyze_shots_parser.add_argument('path') + + args = parser.parse_args() + + if args.command == 'faces': + analyze_faces(args.path) + if args.command == 'labels': + analyze_labels(args.path) + if args.command == 'shots': + analyze_shots(args.path) diff --git a/video/cloud-client/analyze_test.py b/video/cloud-client/analyze_test.py new file mode 100644 index 00000000000..dde0af354c2 --- /dev/null +++ b/video/cloud-client/analyze_test.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python + +# Copyright 2017 Google, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +import analyze + + +# TODO: use `cloud_config` fixture instead of hardcoding +LABEL_FILE_LOCATION = 'gs://python-docs-samples-tests/video/cat.mp4' +FACES_FILE_LOCATION = 'gs://python-docs-samples-tests/video/gbike.mp4' +SHOTS_FILE_LOCATION = ( + 'gs://python-docs-samples-tests/video/gbikes_dinosaur.mp4') + + +@pytest.mark.slow +def test_cat_video_shots(capsys): + analyze.analyze_shots(SHOTS_FILE_LOCATION) + out, _ = capsys.readouterr() + assert 'Scene 1:' in out + + +@pytest.mark.slow +def test_cat_video_faces(capsys): + analyze.analyze_faces(FACES_FILE_LOCATION) + out, _ = capsys.readouterr() + assert 'Thumbnail' in out + + +@pytest.mark.slow +def test_cat_video_labels(capsys): + analyze.analyze_labels(LABEL_FILE_LOCATION) + out, _ = capsys.readouterr() + assert 'Whiskers' in out diff --git a/video/cloud-client/requirements.txt b/video/cloud-client/requirements.txt new file mode 100644 index 00000000000..ba92ac973b7 --- /dev/null +++ b/video/cloud-client/requirements.txt @@ -0,0 +1 @@ +https://storage.googleapis.com/videointelligence-alpha/videointelligence-python.zip From df90050c71dd03fac672c578d41383119cb61783 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 8 Mar 2017 09:48:53 -0800 Subject: [PATCH 2/5] Fix gross hanging indents --- video/cloud-client/analyze.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/video/cloud-client/analyze.py b/video/cloud-client/analyze.py index 8061999be80..419e3388f05 100644 --- a/video/cloud-client/analyze.py +++ b/video/cloud-client/analyze.py @@ -53,9 +53,10 @@ def analyze_faces(path): print('Thumbnail size: {}'.format(len(face.thumbnail))) for segment_id, segment in enumerate(face.segments): - print('Track {}: {} to {}'.format(segment_id, - segment.start_time_offset, - segment.end_time_offset)) + print('Track {}: {} to {}'.format( + segment_id, + segment.start_time_offset, + segment.end_time_offset)) def analyze_labels(path): @@ -81,9 +82,10 @@ def analyze_labels(path): print('Locations:') for l, location in enumerate(label.locations): - print('\t{}: {} to {}'.format(l, - location.segment.start_time_offset, - location.segment.end_time_offset)) + print('\t{}: {} to {}'.format( + l, + location.segment.start_time_offset, + location.segment.end_time_offset)) def analyze_shots(path): @@ -105,9 +107,10 @@ def analyze_shots(path): shots = operation.result().annotation_results[0] for note, shot in enumerate(shots.shot_annotations): - print('Scene {}: {} to {}'.format(note, - shot.start_time_offset, - shot.end_time_offset)) + print('Scene {}: {} to {}'.format( + note, + shot.start_time_offset, + shot.end_time_offset)) if __name__ == '__main__': From f681d8cec4745a1f7df45ce590d66c60e0ad8a45 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 8 Mar 2017 09:51:28 -0800 Subject: [PATCH 3/5] Use cloud_config --- video/cloud-client/analyze_test.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/video/cloud-client/analyze_test.py b/video/cloud-client/analyze_test.py index dde0af354c2..6f20643640a 100644 --- a/video/cloud-client/analyze_test.py +++ b/video/cloud-client/analyze_test.py @@ -19,29 +19,30 @@ import analyze -# TODO: use `cloud_config` fixture instead of hardcoding -LABEL_FILE_LOCATION = 'gs://python-docs-samples-tests/video/cat.mp4' -FACES_FILE_LOCATION = 'gs://python-docs-samples-tests/video/gbike.mp4' -SHOTS_FILE_LOCATION = ( - 'gs://python-docs-samples-tests/video/gbikes_dinosaur.mp4') +LABEL_FILE_PATH = '/video/cat.mp4' +FACES_FILE_PATH = '/video/gbike.mp4' +SHOTS_FILE_PATH = '/video/gbikes_dinosaur.mp4' @pytest.mark.slow -def test_cat_video_shots(capsys): - analyze.analyze_shots(SHOTS_FILE_LOCATION) +def test_cat_video_shots(capsys, cloud_config): + analyze.analyze_shots( + 'gs://{}{}'.format(cloud_config.bucket, SHOTS_FILE_PATH)) out, _ = capsys.readouterr() assert 'Scene 1:' in out @pytest.mark.slow -def test_cat_video_faces(capsys): - analyze.analyze_faces(FACES_FILE_LOCATION) +def test_cat_video_faces(capsys, cloud_config): + analyze.analyze_faces( + 'gs://{}{}'.format(cloud_config.bucket, FACES_FILE_PATH)) out, _ = capsys.readouterr() assert 'Thumbnail' in out @pytest.mark.slow -def test_cat_video_labels(capsys): - analyze.analyze_labels(LABEL_FILE_LOCATION) +def test_cat_video_labels(capsys, cloud_config): + analyze.analyze_labels( + 'gs://{}{}'.format(cloud_config.bucket, LABELS_FILE_PATH) out, _ = capsys.readouterr() assert 'Whiskers' in out From d7856282c1c8af4697c3e58846f4cfd4f54dd1b1 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 8 Mar 2017 10:14:29 -0800 Subject: [PATCH 4/5] Fix syntax error --- video/cloud-client/analyze_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/video/cloud-client/analyze_test.py b/video/cloud-client/analyze_test.py index 6f20643640a..fbd65f43de4 100644 --- a/video/cloud-client/analyze_test.py +++ b/video/cloud-client/analyze_test.py @@ -43,6 +43,6 @@ def test_cat_video_faces(capsys, cloud_config): @pytest.mark.slow def test_cat_video_labels(capsys, cloud_config): analyze.analyze_labels( - 'gs://{}{}'.format(cloud_config.bucket, LABELS_FILE_PATH) + 'gs://{}{}'.format(cloud_config.bucket, LABELS_FILE_PATH)) out, _ = capsys.readouterr() assert 'Whiskers' in out From 8b14d3207948d9d4038be760021d0a1aa9e5fef7 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 8 Mar 2017 10:17:53 -0800 Subject: [PATCH 5/5] Update analyze_test.py --- video/cloud-client/analyze_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/video/cloud-client/analyze_test.py b/video/cloud-client/analyze_test.py index fbd65f43de4..90b510ffd2e 100644 --- a/video/cloud-client/analyze_test.py +++ b/video/cloud-client/analyze_test.py @@ -19,7 +19,7 @@ import analyze -LABEL_FILE_PATH = '/video/cat.mp4' +LABELS_FILE_PATH = '/video/cat.mp4' FACES_FILE_PATH = '/video/gbike.mp4' SHOTS_FILE_PATH = '/video/gbikes_dinosaur.mp4'