Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions video/beta/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@
<version>1.0</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.api.grpc</groupId>
<artifactId>proto-google-cloud-video-intelligence-v1p3beta1</artifactId>
<version>0.83.0</version>
</dependency>
<!-- [START video_java_dependencies_beta] -->
</dependencies>
<!-- [END video_java_dependencies_beta] -->
Expand Down
114 changes: 114 additions & 0 deletions video/beta/src/main/java/com/example/video/DetectFaces.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.example.video;

// [START video_detect_faces_beta]

import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1p3beta1.DetectedAttribute;
import com.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation;
import com.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig;
import com.google.cloud.videointelligence.v1p3beta1.Feature;
import com.google.cloud.videointelligence.v1p3beta1.TimestampedObject;
import com.google.cloud.videointelligence.v1p3beta1.Track;
import com.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1p3beta1.VideoContext;
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
import com.google.protobuf.ByteString;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.ExecutionException;

public class DetectFaces {

public static void detectFaces() throws Exception {
// TODO(developer): Replace these variables before running the sample.
String localFilePath = "resources/googlework_short.mp4";
detectFaces(localFilePath);
}

// Detects faces in a video stored in a local file using the Cloud Video Intelligence API.
public static void detectFaces(String localFilePath) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
VideoIntelligenceServiceClient.create()) {
// Reads a local video file and converts it to base64.
Path path = Paths.get(localFilePath);
byte[] data = Files.readAllBytes(path);
ByteString inputContent = ByteString.copyFrom(data);

FaceDetectionConfig faceDetectionConfig =
FaceDetectionConfig.newBuilder()
// Must set includeBoundingBoxes to true to get facial attributes.
.setIncludeBoundingBoxes(true)
.setIncludeAttributes(true)
.build();
VideoContext videoContext =
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();

AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputContent(inputContent)
.addFeatures(Feature.FACE_DETECTION)
.setVideoContext(videoContext)
.build();

// Detects faces in a video
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
videoIntelligenceServiceClient.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();

// Gets annotations for video
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);

// Annotations for list of faces detected, tracked and recognized in video.
for (FaceDetectionAnnotation faceDetectionAnnotation :
annotationResult.getFaceDetectionAnnotationsList()) {
System.out.print("Face detected:\n");
for (Track track : faceDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf(
"\tStart: %d.%.0fs\n",
segment.getStartTimeOffset().getSeconds(),
segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf(
"\tEnd: %d.%.0fs\n",
segment.getEndTimeOffset().getSeconds(),
segment.getEndTimeOffset().getNanos() / 1e6);

// Each segment includes timestamped objects that
// include characteristics of the face detected.
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);

for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
// Attributes include unique pieces of clothing, like glasses, poses, or hair color.
System.out.printf("\tAttribute: %s;\n", attribute.getName());
}
}
}
}
}
}
// [END video_detect_faces_beta]
104 changes: 104 additions & 0 deletions video/beta/src/main/java/com/example/video/DetectFacesGcs.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.example.video;

// [START video_detect_faces_gcs_beta]

import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1p3beta1.DetectedAttribute;
import com.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation;
import com.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig;
import com.google.cloud.videointelligence.v1p3beta1.Feature;
import com.google.cloud.videointelligence.v1p3beta1.TimestampedObject;
import com.google.cloud.videointelligence.v1p3beta1.Track;
import com.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1p3beta1.VideoContext;
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;

public class DetectFacesGcs {

public static void detectFacesGcs() throws Exception {
// TODO(developer): Replace these variables before running the sample.
String gcsUri = "gs://cloud-samples-data/video/googlework_short.mp4";
detectFacesGcs(gcsUri);
}

// Detects faces in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.
public static void detectFacesGcs(String gcsUri) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
VideoIntelligenceServiceClient.create()) {

FaceDetectionConfig faceDetectionConfig =
FaceDetectionConfig.newBuilder()
// Must set includeBoundingBoxes to true to get facial attributes.
.setIncludeBoundingBoxes(true)
.setIncludeAttributes(true)
.build();
VideoContext videoContext =
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();

AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.FACE_DETECTION)
.setVideoContext(videoContext)
.build();

// Detects faces in a video
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
videoIntelligenceServiceClient.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();

// Gets annotations for video
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);

// Annotations for list of people detected, tracked and recognized in video.
for (FaceDetectionAnnotation faceDetectionAnnotation :
annotationResult.getFaceDetectionAnnotationsList()) {
System.out.print("Face detected:\n");
for (Track track : faceDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf(
"\tStart: %d.%.0fs\n",
segment.getStartTimeOffset().getSeconds(),
segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf(
"\tEnd: %d.%.0fs\n",
segment.getEndTimeOffset().getSeconds(),
segment.getEndTimeOffset().getNanos() / 1e6);

// Each segment includes timestamped objects that
// include characteristics of the face detected.
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);

for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
// Attributes include unique pieces of clothing, like glasses,
// poses, or hair color.
System.out.printf("\tAttribute: %s;\n", attribute.getName());
}
}
}
}
}
}
// [END video_detect_faces_gcs_beta]
124 changes: 124 additions & 0 deletions video/beta/src/main/java/com/example/video/DetectPerson.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
/*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.example.video;

// [START video_detect_person_beta]

import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1p3beta1.DetectedAttribute;
import com.google.cloud.videointelligence.v1p3beta1.DetectedLandmark;
import com.google.cloud.videointelligence.v1p3beta1.Feature;
import com.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation;
import com.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig;
import com.google.cloud.videointelligence.v1p3beta1.TimestampedObject;
import com.google.cloud.videointelligence.v1p3beta1.Track;
import com.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1p3beta1.VideoContext;
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
import com.google.protobuf.ByteString;

import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;

public class DetectPerson {

public static void detectPerson() throws Exception {
// TODO(developer): Replace these variables before running the sample.
String localFilePath = "resources/googlework_short.mp4";
detectPerson(localFilePath);
}


// Detects people in a video stored in a local file using the Cloud Video Intelligence API.
public static void detectPerson(String localFilePath) throws Exception {
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
VideoIntelligenceServiceClient.create()) {
// Reads a local video file and converts it to base64.
Path path = Paths.get(localFilePath);
byte[] data = Files.readAllBytes(path);
ByteString inputContent = ByteString.copyFrom(data);

PersonDetectionConfig personDetectionConfig =
PersonDetectionConfig.newBuilder()
// Must set includeBoundingBoxes to true to get poses and attributes.
.setIncludeBoundingBoxes(true)
.setIncludePoseLandmarks(true)
.setIncludeAttributes(true)
.build();
VideoContext videoContext =
VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();

AnnotateVideoRequest request =
AnnotateVideoRequest.newBuilder()
.setInputContent(inputContent)
.addFeatures(Feature.PERSON_DETECTION)
.setVideoContext(videoContext)
.build();

// Detects people in a video
// We get the first result because only one video is processed.
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
videoIntelligenceServiceClient.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
AnnotateVideoResponse response = future.get();

// Gets annotations for video
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);

// Annotations for list of people detected, tracked and recognized in video.
for (PersonDetectionAnnotation personDetectionAnnotation :
annotationResult.getPersonDetectionAnnotationsList()) {
System.out.print("Person detected:\n");
for (Track track : personDetectionAnnotation.getTracksList()) {
VideoSegment segment = track.getSegment();
System.out.printf(
"\tStart: %d.%.0fs\n",
segment.getStartTimeOffset().getSeconds(),
segment.getStartTimeOffset().getNanos() / 1e6);
System.out.printf(
"\tEnd: %d.%.0fs\n",
segment.getEndTimeOffset().getSeconds(),
segment.getEndTimeOffset().getNanos() / 1e6);

// Each segment includes timestamped objects that include characteristic--e.g. clothes,
// posture of the person detected.
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);

// Attributes include unique pieces of clothing, poses, or hair color.
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
System.out.printf(
"\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
}

// Landmarks in person detection include body parts.
for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
System.out.printf(
"\tLandmark: %s; Vertex: %f, %f\n",
attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
}
}
}
}
}
}
// [END video_detect_person_beta]
Loading