From bac9326f1fce42b4ff25be1da9ab09f2fcd3bfbd Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 5 Jun 2019 08:59:53 -0700 Subject: [PATCH] feat: add ObjectTrackingConfig and support apiEndpoint (#249) --- .../v1/video_intelligence.proto | 32 ++ .../v1p3beta1/video_intelligence.proto | 253 +++++++++++- .../v1/doc_video_intelligence.js | 42 ++ src/v1/video_intelligence_service_client.js | 14 +- .../video_intelligence_service_client.js | 14 +- .../video_intelligence_service_client.js | 14 +- .../video_intelligence_service_client.js | 14 +- .../video_intelligence_service_client.js | 14 +- .../v1p3beta1/doc_video_intelligence.js | 373 +++++++++++++++++- ...aming_video_intelligence_service_client.js | 14 +- .../video_intelligence_service_client.js | 14 +- synth.metadata | 10 +- test/gapic-v1.js | 23 ++ test/gapic-v1beta1.js | 26 ++ test/gapic-v1beta2.js | 26 ++ test/gapic-v1p1beta1.js | 26 ++ test/gapic-v1p2beta1.js | 26 ++ test/gapic-v1p3beta1.js | 53 +++ 18 files changed, 968 insertions(+), 20 deletions(-) diff --git a/protos/google/cloud/videointelligence/v1/video_intelligence.proto b/protos/google/cloud/videointelligence/v1/video_intelligence.proto index 09b51f5d..3d00a0d6 100644 --- a/protos/google/cloud/videointelligence/v1/video_intelligence.proto +++ b/protos/google/cloud/videointelligence/v1/video_intelligence.proto @@ -108,6 +108,9 @@ message VideoContext { // Config for TEXT_DETECTION. TextDetectionConfig text_detection_config = 8; + + // Config for OBJECT_TRACKING. + ObjectTrackingConfig object_tracking_config = 13; } // Config for LABEL_DETECTION. @@ -126,6 +129,22 @@ message LabelDetectionConfig { // Supported values: "builtin/stable" (the default if unset) and // "builtin/latest". string model = 3; + + // The confidence threshold we perform filtering on the labels from + // frame-level detection. If not set, it is set to 0.4 by default. The valid + // range for this threshold is [0.1, 0.9]. Any value set outside of this + // range will be clipped. + // Note: for best results please follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float frame_confidence_threshold = 4; + + // The confidence threshold we perform filtering on the labels from + // video-level and shot-level detections. If not set, it is set to 0.3 by + // default. The valid range for this threshold is [0.1, 0.9]. Any value set + // outside of this range will be clipped. + // Note: for best results please follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float video_confidence_threshold = 5; } // Config for SHOT_CHANGE_DETECTION. @@ -155,6 +174,14 @@ message FaceDetectionConfig { bool include_bounding_boxes = 2; } +// Config for OBJECT_TRACKING. +message ObjectTrackingConfig { + // Model to use for object tracking. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + // Config for TEXT_DETECTION. message TextDetectionConfig { // Language hint can be specified if the language to be detected is known a @@ -163,6 +190,11 @@ message TextDetectionConfig { // // Automatic language detection is performed if no hint is provided. repeated string language_hints = 1; + + // Model to use for text detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 2; } // Video segment. diff --git a/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto b/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto index dc65a651..763413b1 100644 --- a/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto +++ b/protos/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto @@ -109,8 +109,14 @@ message VideoContext { // Config for EXPLICIT_CONTENT_DETECTION. ExplicitContentDetectionConfig explicit_content_detection_config = 4; + // Config for SPEECH_TRANSCRIPTION. + SpeechTranscriptionConfig speech_transcription_config = 6; + // Config for TEXT_DETECTION. TextDetectionConfig text_detection_config = 8; + + // Config for OBJECT_TRACKING. + ObjectTrackingConfig object_tracking_config = 13; } // Config for LABEL_DETECTION. @@ -129,6 +135,22 @@ message LabelDetectionConfig { // Supported values: "builtin/stable" (the default if unset) and // "builtin/latest". string model = 3; + + // The confidence threshold we perform filtering on the labels from + // frame-level detection. If not set, it is set to 0.4 by default. The valid + // range for this threshold is [0.1, 0.9]. Any value set outside of this + // range will be clipped. + // Note: for best results please follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float frame_confidence_threshold = 4; + + // The confidence threshold we perform filtering on the labels from + // video-level and shot-level detections. If not set, it is set to 0.3 by + // default. The valid range for this threshold is [0.1, 0.9]. Any value set + // outside of this range will be clipped. + // Note: for best results please follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float video_confidence_threshold = 5; } // Config for SHOT_CHANGE_DETECTION. @@ -139,6 +161,14 @@ message ShotChangeDetectionConfig { string model = 1; } +// Config for OBJECT_TRACKING. +message ObjectTrackingConfig { + // Model to use for object tracking. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + // Config for EXPLICIT_CONTENT_DETECTION. message ExplicitContentDetectionConfig { // Model to use for explicit content detection. @@ -155,6 +185,11 @@ message TextDetectionConfig { // // Automatic language detection is performed if no hint is provided. repeated string language_hints = 1; + + // Model to use for text detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 2; } // Video segment. @@ -254,6 +289,51 @@ message NormalizedBoundingBox { float bottom = 4; } +// For tracking related features, such as LOGO_RECOGNITION, FACE_DETECTION, +// CELEBRITY_RECOGNITION, PERSON_DETECTION. +// An object at time_offset with attributes, and located with +// normalized_bounding_box. +message TimestampedObject { + // Normalized Bounding box in a frame, where the object is located. + NormalizedBoundingBox normalized_bounding_box = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this object. + google.protobuf.Duration time_offset = 2; + + // Optional. The attributes of the object in the bounding box. + repeated DetectedAttribute attributes = 3; +} + +// A track of an object instance. +message Track { + // Video segment of a track. + VideoSegment segment = 1; + + // The object with timestamp and attributes per frame in the track. + repeated TimestampedObject timestamped_objects = 2; + + // Optional. Attributes in the track level. + repeated DetectedAttribute attributes = 3; + + // Optional. The confidence score of the tracked object. + float confidence = 4; +} + +// A generic detected attribute represented by name in string format. +message DetectedAttribute { + // The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc. + // A full list of supported type names will be provided in the document. + string name = 1; + + // Detected attribute confidence. Range [0, 1]. + float confidence = 2; + + // Text value of the detection result. For example, the value for "HairColor" + // can be "black", "blonde", etc. + string value = 3; +} + // Annotation results for a single video. message VideoAnnotationResults { // Video file location in @@ -278,6 +358,9 @@ message VideoAnnotationResults { // Explicit content annotation. ExplicitContentAnnotation explicit_annotation = 7; + // Speech transcription. + repeated SpeechTranscription speech_transcriptions = 11; + // OCR text detection and tracking. // Annotations for list of detected text snippets. Each will have list of // frame information associated with it. @@ -286,6 +369,9 @@ message VideoAnnotationResults { // Annotations for list of objects detected and tracked in video. repeated ObjectTrackingAnnotation object_annotations = 14; + // Annotations for list of logos detected, tracked and recognized in video. + repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` // some videos may succeed and some may fail. google.rpc.Status error = 9; @@ -324,6 +410,142 @@ message AnnotateVideoProgress { repeated VideoAnnotationProgress annotation_progress = 1; } +// Config for SPEECH_TRANSCRIPTION. +message SpeechTranscriptionConfig { + // *Required* The language of the supplied audio as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // Example: "en-US". + // See [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + string language_code = 1; + + // *Optional* Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechTranscription`. The server may return fewer than + // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will + // return a maximum of one. If omitted, will return a maximum of one. + int32 max_alternatives = 2; + + // *Optional* If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + bool filter_profanity = 3; + + // *Optional* A means to provide context to assist the speech recognition. + repeated SpeechContext speech_contexts = 4; + + // *Optional* If 'true', adds punctuation to recognition result hypotheses. + // This feature is only available in select languages. Setting this for + // requests in other languages has no effect at all. The default 'false' value + // does not add punctuation to result hypotheses. NOTE: "This is currently + // offered as an experimental service, complimentary to all users. In the + // future this may be exclusively available as a premium feature." + bool enable_automatic_punctuation = 5; + + // *Optional* For file formats, such as MXF or MKV, supporting multiple audio + // tracks, specify up to two tracks. Default: track 0. + repeated int32 audio_tracks = 6; + + // *Optional* If 'true', enables speaker detection for each recognized word in + // the top alternative of the recognition result using a speaker_tag provided + // in the WordInfo. + // Note: When this is true, we send all the words from the beginning of the + // audio for the top alternative in every consecutive responses. + // This is done in order to improve our speaker tags as our models learn to + // identify the speakers in the conversation over time. + bool enable_speaker_diarization = 7; + + // *Optional* + // If set, specifies the estimated number of speakers in the conversation. + // If not set, defaults to '2'. + // Ignored unless enable_speaker_diarization is set to true. + int32 diarization_speaker_count = 8; + + // *Optional* If `true`, the top result includes a list of words and the + // confidence for those words. If `false`, no word-level confidence + // information is returned. The default is `false`. + bool enable_word_confidence = 9; +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +message SpeechContext { + // *Optional* A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + repeated string phrases = 1; +} + +// A speech recognition result corresponding to a portion of the audio. +message SpeechTranscription { + // May contain one or more recognition hypotheses (up to the maximum specified + // in `max_alternatives`). These alternatives are ordered in terms of + // accuracy, with the top (first) alternative being the most probable, as + // ranked by the recognizer. + repeated SpeechRecognitionAlternative alternatives = 1; + + // Output only. The + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the + // language in this result. This language code was detected to have the most + // likelihood of being spoken in the audio. + string language_code = 2; +} + +// Alternative hypotheses (a.k.a. n-best list). +message SpeechRecognitionAlternative { + // Transcript text representing the words that the user spoke. + string transcript = 1; + + // The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is typically provided only for the top hypothesis, and + // only for `is_final=true` results. Clients should not rely on the + // `confidence` field as it is not guaranteed to be accurate or consistent. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 2; + + // A list of word-specific information for each recognized word. + repeated WordInfo words = 3; +} + +// Word-specific information for recognized words. Word information is only +// included in the response when certain request parameters are set, such +// as `enable_word_time_offsets`. +message WordInfo { + // Time offset relative to the beginning of the audio, and + // corresponding to the start of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration start_time = 1; + + // Time offset relative to the beginning of the audio, and + // corresponding to the end of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration end_time = 2; + + // The word corresponding to this set of information. + string word = 3; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 4; + + // Output only. A distinct integer value is assigned for every speaker within + // the audio. This field specifies which one of those speakers was detected to + // have spoken this word. Value ranges from 1 up to diarization_speaker_count, + // and is only set if speaker diarization is enabled. + int32 speaker_tag = 5; +} + // A vertex represents a 2D point in the image. // NOTE: the normalized vertex coordinates are relative to the original image // and range from 0 to 1. @@ -432,6 +654,21 @@ message ObjectTrackingAnnotation { } } +// Annotation corresponding to one detected, tracked and recognized logo class. +message LogoRecognitionAnnotation { + // Entity category information to specify the logo class that all the logo + // tracks within this LogoRecognitionAnnotation are recognized as. + Entity entity = 1; + + // All logo tracks where the recognized logo appears. Each track corresponds + // to one logo instance appearing in consecutive frames. + repeated Track tracks = 2; + + // All video segments where the recognized logo appears. There might be + // multiple instances of the same logo class appearing in one VideoSegment. + repeated VideoSegment segments = 3; +} + // The top-level message sent by the client for the `StreamingAnnotateVideo` // method. Multiple `StreamingAnnotateVideoRequest` messages are sent. // The first message must only contain a `StreamingVideoConfig` message. @@ -450,6 +687,8 @@ message StreamingAnnotateVideoRequest { // `StreamingAnnotateVideoRequest` message containing only // `video_config`, all subsequent `AnnotateStreamingVideoRequest` // messages must only contain `input_content` field. + // Note: as with all bytes fields, protobuffers use a pure binary + // representation (not base64). bytes input_content = 2; } } @@ -458,7 +697,7 @@ message StreamingAnnotateVideoRequest { // by `StreamingAnnotateVideo`. A series of zero or more // `StreamingAnnotateVideoResponse` messages are streamed back to the client. message StreamingAnnotateVideoResponse { - // If set, returns a [google.rpc.Status][] message that + // If set, returns a [google.rpc.Status][google.rpc.Status] message that // specifies the error for the operation. google.rpc.Status error = 1; @@ -522,7 +761,7 @@ message StreamingVideoAnnotationResults { // Label annotation results. repeated LabelAnnotation label_annotations = 2; - // Explicit content detection results. + // Explicit content annotation results. ExplicitContentAnnotation explicit_annotation = 3; // Object tracking results. @@ -537,10 +776,10 @@ message StreamingVideoConfig { // Config for requested annotation feature. oneof streaming_config { - // Config for SHOT_CHANGE_DETECTION. + // Config for STREAMING_SHOT_CHANGE_DETECTION. StreamingShotChangeDetectionConfig shot_change_detection_config = 2; - // Config for LABEL_DETECTION. + // Config for STREAMING_LABEL_DETECTION. StreamingLabelDetectionConfig label_detection_config = 3; // Config for STREAMING_EXPLICIT_CONTENT_DETECTION. @@ -569,11 +808,17 @@ enum Feature { // Explicit content detection. EXPLICIT_CONTENT_DETECTION = 3; + // Speech transcription. + SPEECH_TRANSCRIPTION = 6; + // OCR text detection and tracking. TEXT_DETECTION = 7; // Object detection and tracking. OBJECT_TRACKING = 9; + + // Logo detection, tracking, and recognition. + LOGO_RECOGNITION = 12; } // Label detection mode. diff --git a/src/v1/doc/google/cloud/videointelligence/v1/doc_video_intelligence.js b/src/v1/doc/google/cloud/videointelligence/v1/doc_video_intelligence.js index fb77061d..43a0e41d 100644 --- a/src/v1/doc/google/cloud/videointelligence/v1/doc_video_intelligence.js +++ b/src/v1/doc/google/cloud/videointelligence/v1/doc_video_intelligence.js @@ -106,6 +106,11 @@ const AnnotateVideoRequest = { * * This object should have the same structure as [TextDetectionConfig]{@link google.cloud.videointelligence.v1.TextDetectionConfig} * + * @property {Object} objectTrackingConfig + * Config for OBJECT_TRACKING. + * + * This object should have the same structure as [ObjectTrackingConfig]{@link google.cloud.videointelligence.v1.ObjectTrackingConfig} + * * @typedef VideoContext * @memberof google.cloud.videointelligence.v1 * @see [google.cloud.videointelligence.v1.VideoContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto} @@ -134,6 +139,22 @@ const VideoContext = { * Supported values: "builtin/stable" (the default if unset) and * "builtin/latest". * + * @property {number} frameConfidenceThreshold + * The confidence threshold we perform filtering on the labels from + * frame-level detection. If not set, it is set to 0.4 by default. The valid + * range for this threshold is [0.1, 0.9]. Any value set outside of this + * range will be clipped. + * Note: for best results please follow the default threshold. We will update + * the default threshold everytime when we release a new model. + * + * @property {number} videoConfidenceThreshold + * The confidence threshold we perform filtering on the labels from + * video-level and shot-level detections. If not set, it is set to 0.3 by + * default. The valid range for this threshold is [0.1, 0.9]. Any value set + * outside of this range will be clipped. + * Note: for best results please follow the default threshold. We will update + * the default threshold everytime when we release a new model. + * * @typedef LabelDetectionConfig * @memberof google.cloud.videointelligence.v1 * @see [google.cloud.videointelligence.v1.LabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto} @@ -193,6 +214,22 @@ const FaceDetectionConfig = { // This is for documentation. Actual contents will be loaded by gRPC. }; +/** + * Config for OBJECT_TRACKING. + * + * @property {string} model + * Model to use for object tracking. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @typedef ObjectTrackingConfig + * @memberof google.cloud.videointelligence.v1 + * @see [google.cloud.videointelligence.v1.ObjectTrackingConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto} + */ +const ObjectTrackingConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + /** * Config for TEXT_DETECTION. * @@ -203,6 +240,11 @@ const FaceDetectionConfig = { * * Automatic language detection is performed if no hint is provided. * + * @property {string} model + * Model to use for text detection. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * * @typedef TextDetectionConfig * @memberof google.cloud.videointelligence.v1 * @see [google.cloud.videointelligence.v1.TextDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1/video_intelligence.proto} diff --git a/src/v1/video_intelligence_service_client.js b/src/v1/video_intelligence_service_client.js index a9df1a26..56b43b22 100644 --- a/src/v1/video_intelligence_service_client.js +++ b/src/v1/video_intelligence_service_client.js @@ -56,14 +56,18 @@ class VideoIntelligenceServiceClient { * API remote host. */ constructor(opts) { + opts = opts || {}; this._descriptors = {}; + const servicePath = + opts.servicePath || opts.apiEndpoint || this.constructor.servicePath; + // Ensure that options include the service address and port. opts = Object.assign( { clientConfig: {}, port: this.constructor.port, - servicePath: this.constructor.servicePath, + servicePath, }, opts ); @@ -176,6 +180,14 @@ class VideoIntelligenceServiceClient { return 'videointelligence.googleapis.com'; } + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + /** * The port for this API service. */ diff --git a/src/v1beta1/video_intelligence_service_client.js b/src/v1beta1/video_intelligence_service_client.js index 7fdf378b..42f73510 100644 --- a/src/v1beta1/video_intelligence_service_client.js +++ b/src/v1beta1/video_intelligence_service_client.js @@ -56,14 +56,18 @@ class VideoIntelligenceServiceClient { * API remote host. */ constructor(opts) { + opts = opts || {}; this._descriptors = {}; + const servicePath = + opts.servicePath || opts.apiEndpoint || this.constructor.servicePath; + // Ensure that options include the service address and port. opts = Object.assign( { clientConfig: {}, port: this.constructor.port, - servicePath: this.constructor.servicePath, + servicePath, }, opts ); @@ -176,6 +180,14 @@ class VideoIntelligenceServiceClient { return 'videointelligence.googleapis.com'; } + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + /** * The port for this API service. */ diff --git a/src/v1beta2/video_intelligence_service_client.js b/src/v1beta2/video_intelligence_service_client.js index b85f0756..eced0a6c 100644 --- a/src/v1beta2/video_intelligence_service_client.js +++ b/src/v1beta2/video_intelligence_service_client.js @@ -56,14 +56,18 @@ class VideoIntelligenceServiceClient { * API remote host. */ constructor(opts) { + opts = opts || {}; this._descriptors = {}; + const servicePath = + opts.servicePath || opts.apiEndpoint || this.constructor.servicePath; + // Ensure that options include the service address and port. opts = Object.assign( { clientConfig: {}, port: this.constructor.port, - servicePath: this.constructor.servicePath, + servicePath, }, opts ); @@ -176,6 +180,14 @@ class VideoIntelligenceServiceClient { return 'videointelligence.googleapis.com'; } + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + /** * The port for this API service. */ diff --git a/src/v1p1beta1/video_intelligence_service_client.js b/src/v1p1beta1/video_intelligence_service_client.js index f941f2ff..953d8d3b 100644 --- a/src/v1p1beta1/video_intelligence_service_client.js +++ b/src/v1p1beta1/video_intelligence_service_client.js @@ -56,14 +56,18 @@ class VideoIntelligenceServiceClient { * API remote host. */ constructor(opts) { + opts = opts || {}; this._descriptors = {}; + const servicePath = + opts.servicePath || opts.apiEndpoint || this.constructor.servicePath; + // Ensure that options include the service address and port. opts = Object.assign( { clientConfig: {}, port: this.constructor.port, - servicePath: this.constructor.servicePath, + servicePath, }, opts ); @@ -176,6 +180,14 @@ class VideoIntelligenceServiceClient { return 'videointelligence.googleapis.com'; } + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + /** * The port for this API service. */ diff --git a/src/v1p2beta1/video_intelligence_service_client.js b/src/v1p2beta1/video_intelligence_service_client.js index 8d51b8a9..f098bf22 100644 --- a/src/v1p2beta1/video_intelligence_service_client.js +++ b/src/v1p2beta1/video_intelligence_service_client.js @@ -56,14 +56,18 @@ class VideoIntelligenceServiceClient { * API remote host. */ constructor(opts) { + opts = opts || {}; this._descriptors = {}; + const servicePath = + opts.servicePath || opts.apiEndpoint || this.constructor.servicePath; + // Ensure that options include the service address and port. opts = Object.assign( { clientConfig: {}, port: this.constructor.port, - servicePath: this.constructor.servicePath, + servicePath, }, opts ); @@ -176,6 +180,14 @@ class VideoIntelligenceServiceClient { return 'videointelligence.googleapis.com'; } + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + /** * The port for this API service. */ diff --git a/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js b/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js index 48ee5eb0..2b706204 100644 --- a/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js +++ b/src/v1p3beta1/doc/google/cloud/videointelligence/v1p3beta1/doc_video_intelligence.js @@ -91,11 +91,21 @@ const AnnotateVideoRequest = { * * This object should have the same structure as [ExplicitContentDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentDetectionConfig} * + * @property {Object} speechTranscriptionConfig + * Config for SPEECH_TRANSCRIPTION. + * + * This object should have the same structure as [SpeechTranscriptionConfig]{@link google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig} + * * @property {Object} textDetectionConfig * Config for TEXT_DETECTION. * * This object should have the same structure as [TextDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.TextDetectionConfig} * + * @property {Object} objectTrackingConfig + * Config for OBJECT_TRACKING. + * + * This object should have the same structure as [ObjectTrackingConfig]{@link google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig} + * * @typedef VideoContext * @memberof google.cloud.videointelligence.v1p3beta1 * @see [google.cloud.videointelligence.v1p3beta1.VideoContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} @@ -124,6 +134,22 @@ const VideoContext = { * Supported values: "builtin/stable" (the default if unset) and * "builtin/latest". * + * @property {number} frameConfidenceThreshold + * The confidence threshold we perform filtering on the labels from + * frame-level detection. If not set, it is set to 0.4 by default. The valid + * range for this threshold is [0.1, 0.9]. Any value set outside of this + * range will be clipped. + * Note: for best results please follow the default threshold. We will update + * the default threshold everytime when we release a new model. + * + * @property {number} videoConfidenceThreshold + * The confidence threshold we perform filtering on the labels from + * video-level and shot-level detections. If not set, it is set to 0.3 by + * default. The valid range for this threshold is [0.1, 0.9]. Any value set + * outside of this range will be clipped. + * Note: for best results please follow the default threshold. We will update + * the default threshold everytime when we release a new model. + * * @typedef LabelDetectionConfig * @memberof google.cloud.videointelligence.v1p3beta1 * @see [google.cloud.videointelligence.v1p3beta1.LabelDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} @@ -148,6 +174,22 @@ const ShotChangeDetectionConfig = { // This is for documentation. Actual contents will be loaded by gRPC. }; +/** + * Config for OBJECT_TRACKING. + * + * @property {string} model + * Model to use for object tracking. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * + * @typedef ObjectTrackingConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.ObjectTrackingConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const ObjectTrackingConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + /** * Config for EXPLICIT_CONTENT_DETECTION. * @@ -174,6 +216,11 @@ const ExplicitContentDetectionConfig = { * * Automatic language detection is performed if no hint is provided. * + * @property {string} model + * Model to use for text detection. + * Supported values: "builtin/stable" (the default if unset) and + * "builtin/latest". + * * @typedef TextDetectionConfig * @memberof google.cloud.videointelligence.v1p3beta1 * @see [google.cloud.videointelligence.v1p3beta1.TextDetectionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} @@ -365,6 +412,87 @@ const NormalizedBoundingBox = { // This is for documentation. Actual contents will be loaded by gRPC. }; +/** + * For tracking related features, such as LOGO_RECOGNITION, FACE_DETECTION, + * CELEBRITY_RECOGNITION, PERSON_DETECTION. + * An object at time_offset with attributes, and located with + * normalized_bounding_box. + * + * @property {Object} normalizedBoundingBox + * Normalized Bounding box in a frame, where the object is located. + * + * This object should have the same structure as [NormalizedBoundingBox]{@link google.cloud.videointelligence.v1p3beta1.NormalizedBoundingBox} + * + * @property {Object} timeOffset + * Time-offset, relative to the beginning of the video, + * corresponding to the video frame for this object. + * + * This object should have the same structure as [Duration]{@link google.protobuf.Duration} + * + * @property {Object[]} attributes + * Optional. The attributes of the object in the bounding box. + * + * This object should have the same structure as [DetectedAttribute]{@link google.cloud.videointelligence.v1p3beta1.DetectedAttribute} + * + * @typedef TimestampedObject + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.TimestampedObject definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const TimestampedObject = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A track of an object instance. + * + * @property {Object} segment + * Video segment of a track. + * + * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment} + * + * @property {Object[]} timestampedObjects + * The object with timestamp and attributes per frame in the track. + * + * This object should have the same structure as [TimestampedObject]{@link google.cloud.videointelligence.v1p3beta1.TimestampedObject} + * + * @property {Object[]} attributes + * Optional. Attributes in the track level. + * + * This object should have the same structure as [DetectedAttribute]{@link google.cloud.videointelligence.v1p3beta1.DetectedAttribute} + * + * @property {number} confidence + * Optional. The confidence score of the tracked object. + * + * @typedef Track + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.Track definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const Track = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A generic detected attribute represented by name in string format. + * + * @property {string} name + * The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc. + * A full list of supported type names will be provided in the document. + * + * @property {number} confidence + * Detected attribute confidence. Range [0, 1]. + * + * @property {string} value + * Text value of the detection result. For example, the value for "HairColor" + * can be "black", "blonde", etc. + * + * @typedef DetectedAttribute + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.DetectedAttribute definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const DetectedAttribute = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + /** * Annotation results for a single video. * @@ -400,6 +528,11 @@ const NormalizedBoundingBox = { * * This object should have the same structure as [ExplicitContentAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} * + * @property {Object[]} speechTranscriptions + * Speech transcription. + * + * This object should have the same structure as [SpeechTranscription]{@link google.cloud.videointelligence.v1p3beta1.SpeechTranscription} + * * @property {Object[]} textAnnotations * OCR text detection and tracking. * Annotations for list of detected text snippets. Each will have list of @@ -412,6 +545,11 @@ const NormalizedBoundingBox = { * * This object should have the same structure as [ObjectTrackingAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ObjectTrackingAnnotation} * + * @property {Object[]} logoRecognitionAnnotations + * Annotations for list of logos detected, tracked and recognized in video. + * + * This object should have the same structure as [LogoRecognitionAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation} + * * @property {Object} error * If set, indicates an error. Note that for a single `AnnotateVideoRequest` * some videos may succeed and some may fail. @@ -491,6 +629,192 @@ const AnnotateVideoProgress = { // This is for documentation. Actual contents will be loaded by gRPC. }; +/** + * Config for SPEECH_TRANSCRIPTION. + * + * @property {string} languageCode + * *Required* The language of the supplied audio as a + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + * Example: "en-US". + * See [Language Support](https://cloud.google.com/speech/docs/languages) + * for a list of the currently supported language codes. + * + * @property {number} maxAlternatives + * *Optional* Maximum number of recognition hypotheses to be returned. + * Specifically, the maximum number of `SpeechRecognitionAlternative` messages + * within each `SpeechTranscription`. The server may return fewer than + * `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will + * return a maximum of one. If omitted, will return a maximum of one. + * + * @property {boolean} filterProfanity + * *Optional* If set to `true`, the server will attempt to filter out + * profanities, replacing all but the initial character in each filtered word + * with asterisks, e.g. "f***". If set to `false` or omitted, profanities + * won't be filtered out. + * + * @property {Object[]} speechContexts + * *Optional* A means to provide context to assist the speech recognition. + * + * This object should have the same structure as [SpeechContext]{@link google.cloud.videointelligence.v1p3beta1.SpeechContext} + * + * @property {boolean} enableAutomaticPunctuation + * *Optional* If 'true', adds punctuation to recognition result hypotheses. + * This feature is only available in select languages. Setting this for + * requests in other languages has no effect at all. The default 'false' value + * does not add punctuation to result hypotheses. NOTE: "This is currently + * offered as an experimental service, complimentary to all users. In the + * future this may be exclusively available as a premium feature." + * + * @property {number[]} audioTracks + * *Optional* For file formats, such as MXF or MKV, supporting multiple audio + * tracks, specify up to two tracks. Default: track 0. + * + * @property {boolean} enableSpeakerDiarization + * *Optional* If 'true', enables speaker detection for each recognized word in + * the top alternative of the recognition result using a speaker_tag provided + * in the WordInfo. + * Note: When this is true, we send all the words from the beginning of the + * audio for the top alternative in every consecutive responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * + * @property {number} diarizationSpeakerCount + * *Optional* + * If set, specifies the estimated number of speakers in the conversation. + * If not set, defaults to '2'. + * Ignored unless enable_speaker_diarization is set to true. + * + * @property {boolean} enableWordConfidence + * *Optional* If `true`, the top result includes a list of words and the + * confidence for those words. If `false`, no word-level confidence + * information is returned. The default is `false`. + * + * @typedef SpeechTranscriptionConfig + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.SpeechTranscriptionConfig definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const SpeechTranscriptionConfig = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Provides "hints" to the speech recognizer to favor specific words and phrases + * in the results. + * + * @property {string[]} phrases + * *Optional* A list of strings containing words and phrases "hints" so that + * the speech recognition is more likely to recognize them. This can be used + * to improve the accuracy for specific words and phrases, for example, if + * specific commands are typically spoken by the user. This can also be used + * to add additional words to the vocabulary of the recognizer. See + * [usage limits](https://cloud.google.com/speech/limits#content). + * + * @typedef SpeechContext + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.SpeechContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const SpeechContext = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * A speech recognition result corresponding to a portion of the audio. + * + * @property {Object[]} alternatives + * May contain one or more recognition hypotheses (up to the maximum specified + * in `max_alternatives`). These alternatives are ordered in terms of + * accuracy, with the top (first) alternative being the most probable, as + * ranked by the recognizer. + * + * This object should have the same structure as [SpeechRecognitionAlternative]{@link google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative} + * + * @property {string} languageCode + * Output only. The + * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the + * language in this result. This language code was detected to have the most + * likelihood of being spoken in the audio. + * + * @typedef SpeechTranscription + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.SpeechTranscription definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const SpeechTranscription = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Alternative hypotheses (a.k.a. n-best list). + * + * @property {string} transcript + * Transcript text representing the words that the user spoke. + * + * @property {number} confidence + * The confidence estimate between 0.0 and 1.0. A higher number + * indicates an estimated greater likelihood that the recognized words are + * correct. This field is typically provided only for the top hypothesis, and + * only for `is_final=true` results. Clients should not rely on the + * `confidence` field as it is not guaranteed to be accurate or consistent. + * The default of 0.0 is a sentinel value indicating `confidence` was not set. + * + * @property {Object[]} words + * A list of word-specific information for each recognized word. + * + * This object should have the same structure as [WordInfo]{@link google.cloud.videointelligence.v1p3beta1.WordInfo} + * + * @typedef SpeechRecognitionAlternative + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.SpeechRecognitionAlternative definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const SpeechRecognitionAlternative = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + +/** + * Word-specific information for recognized words. Word information is only + * included in the response when certain request parameters are set, such + * as `enable_word_time_offsets`. + * + * @property {Object} startTime + * Time offset relative to the beginning of the audio, and + * corresponding to the start of the spoken word. This field is only set if + * `enable_word_time_offsets=true` and only in the top hypothesis. This is an + * experimental feature and the accuracy of the time offset can vary. + * + * This object should have the same structure as [Duration]{@link google.protobuf.Duration} + * + * @property {Object} endTime + * Time offset relative to the beginning of the audio, and + * corresponding to the end of the spoken word. This field is only set if + * `enable_word_time_offsets=true` and only in the top hypothesis. This is an + * experimental feature and the accuracy of the time offset can vary. + * + * This object should have the same structure as [Duration]{@link google.protobuf.Duration} + * + * @property {string} word + * The word corresponding to this set of information. + * + * @property {number} confidence + * Output only. The confidence estimate between 0.0 and 1.0. A higher number + * indicates an estimated greater likelihood that the recognized words are + * correct. This field is set only for the top alternative. + * This field is not guaranteed to be accurate and users should not rely on it + * to be always provided. + * The default of 0.0 is a sentinel value indicating `confidence` was not set. + * + * @property {number} speakerTag + * Output only. A distinct integer value is assigned for every speaker within + * the audio. This field specifies which one of those speakers was detected to + * have spoken this word. Value ranges from 1 up to diarization_speaker_count, + * and is only set if speaker diarization is enabled. + * + * @typedef WordInfo + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.WordInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const WordInfo = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + /** * A vertex represents a 2D point in the image. * NOTE: the normalized vertex coordinates are relative to the original image @@ -674,6 +998,35 @@ const ObjectTrackingAnnotation = { // This is for documentation. Actual contents will be loaded by gRPC. }; +/** + * Annotation corresponding to one detected, tracked and recognized logo class. + * + * @property {Object} entity + * Entity category information to specify the logo class that all the logo + * tracks within this LogoRecognitionAnnotation are recognized as. + * + * This object should have the same structure as [Entity]{@link google.cloud.videointelligence.v1p3beta1.Entity} + * + * @property {Object[]} tracks + * All logo tracks where the recognized logo appears. Each track corresponds + * to one logo instance appearing in consecutive frames. + * + * This object should have the same structure as [Track]{@link google.cloud.videointelligence.v1p3beta1.Track} + * + * @property {Object[]} segments + * All video segments where the recognized logo appears. There might be + * multiple instances of the same logo class appearing in one VideoSegment. + * + * This object should have the same structure as [VideoSegment]{@link google.cloud.videointelligence.v1p3beta1.VideoSegment} + * + * @typedef LogoRecognitionAnnotation + * @memberof google.cloud.videointelligence.v1p3beta1 + * @see [google.cloud.videointelligence.v1p3beta1.LogoRecognitionAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/videointelligence/v1p3beta1/video_intelligence.proto} + */ +const LogoRecognitionAnnotation = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; + /** * The top-level message sent by the client for the `StreamingAnnotateVideo` * method. Multiple `StreamingAnnotateVideoRequest` messages are sent. @@ -693,6 +1046,8 @@ const ObjectTrackingAnnotation = { * `StreamingAnnotateVideoRequest` message containing only * `video_config`, all subsequent `AnnotateStreamingVideoRequest` * messages must only contain `input_content` field. + * Note: as with all bytes fields, protobuffers use a pure binary + * representation (not base64). * * @typedef StreamingAnnotateVideoRequest * @memberof google.cloud.videointelligence.v1p3beta1 @@ -822,7 +1177,7 @@ const StreamingStorageConfig = { * This object should have the same structure as [LabelAnnotation]{@link google.cloud.videointelligence.v1p3beta1.LabelAnnotation} * * @property {Object} explicitAnnotation - * Explicit content detection results. + * Explicit content annotation results. * * This object should have the same structure as [ExplicitContentAnnotation]{@link google.cloud.videointelligence.v1p3beta1.ExplicitContentAnnotation} * @@ -849,12 +1204,12 @@ const StreamingVideoAnnotationResults = { * The number should be among the values of [StreamingFeature]{@link google.cloud.videointelligence.v1p3beta1.StreamingFeature} * * @property {Object} shotChangeDetectionConfig - * Config for SHOT_CHANGE_DETECTION. + * Config for STREAMING_SHOT_CHANGE_DETECTION. * * This object should have the same structure as [StreamingShotChangeDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingShotChangeDetectionConfig} * * @property {Object} labelDetectionConfig - * Config for LABEL_DETECTION. + * Config for STREAMING_LABEL_DETECTION. * * This object should have the same structure as [StreamingLabelDetectionConfig]{@link google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig} * @@ -909,6 +1264,11 @@ const Feature = { */ EXPLICIT_CONTENT_DETECTION: 3, + /** + * Speech transcription. + */ + SPEECH_TRANSCRIPTION: 6, + /** * OCR text detection and tracking. */ @@ -917,7 +1277,12 @@ const Feature = { /** * Object detection and tracking. */ - OBJECT_TRACKING: 9 + OBJECT_TRACKING: 9, + + /** + * Logo detection, tracking, and recognition. + */ + LOGO_RECOGNITION: 12 }; /** diff --git a/src/v1p3beta1/streaming_video_intelligence_service_client.js b/src/v1p3beta1/streaming_video_intelligence_service_client.js index 29e2e762..b893493e 100644 --- a/src/v1p3beta1/streaming_video_intelligence_service_client.js +++ b/src/v1p3beta1/streaming_video_intelligence_service_client.js @@ -55,14 +55,18 @@ class StreamingVideoIntelligenceServiceClient { * API remote host. */ constructor(opts) { + opts = opts || {}; this._descriptors = {}; + const servicePath = + opts.servicePath || opts.apiEndpoint || this.constructor.servicePath; + // Ensure that options include the service address and port. opts = Object.assign( { clientConfig: {}, port: this.constructor.port, - servicePath: this.constructor.servicePath, + servicePath, }, opts ); @@ -152,6 +156,14 @@ class StreamingVideoIntelligenceServiceClient { return 'videointelligence.googleapis.com'; } + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + /** * The port for this API service. */ diff --git a/src/v1p3beta1/video_intelligence_service_client.js b/src/v1p3beta1/video_intelligence_service_client.js index 90a7d7a0..f16ca8fc 100644 --- a/src/v1p3beta1/video_intelligence_service_client.js +++ b/src/v1p3beta1/video_intelligence_service_client.js @@ -56,14 +56,18 @@ class VideoIntelligenceServiceClient { * API remote host. */ constructor(opts) { + opts = opts || {}; this._descriptors = {}; + const servicePath = + opts.servicePath || opts.apiEndpoint || this.constructor.servicePath; + // Ensure that options include the service address and port. opts = Object.assign( { clientConfig: {}, port: this.constructor.port, - servicePath: this.constructor.servicePath, + servicePath, }, opts ); @@ -176,6 +180,14 @@ class VideoIntelligenceServiceClient { return 'videointelligence.googleapis.com'; } + /** + * The DNS address for this API service - same as servicePath(), + * exists for compatibility reasons. + */ + static get apiEndpoint() { + return 'videointelligence.googleapis.com'; + } + /** * The port for this API service. */ diff --git a/synth.metadata b/synth.metadata index 85d62d1c..a08fe7db 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-05-21T11:29:32.143324Z", + "updateTime": "2019-06-05T14:30:25.384482Z", "sources": [ { "generator": { "name": "artman", - "version": "0.20.0", - "dockerImage": "googleapis/artman@sha256:3246adac900f4bdbd62920e80de2e5877380e44036b3feae13667ec255ebf5ec" + "version": "0.23.1", + "dockerImage": "googleapis/artman@sha256:9d5cae1454da64ac3a87028f8ef486b04889e351c83bb95e83b8fab3959faed0" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "32a10f69e2c9ce15bba13ab1ff928bacebb25160", - "internalRef": "249058354" + "sha": "47c142a7cecc6efc9f6f8af804b8be55392b795b", + "internalRef": "251635729" } }, { diff --git a/test/gapic-v1.js b/test/gapic-v1.js index 45c4e522..5dad8ce8 100644 --- a/test/gapic-v1.js +++ b/test/gapic-v1.js @@ -23,6 +23,29 @@ const error = new Error(); error.code = FAKE_STATUS_CODE; describe('VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = + videoIntelligenceModule.v1.VideoIntelligenceServiceClient.servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = + videoIntelligenceModule.v1.VideoIntelligenceServiceClient.apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = videoIntelligenceModule.v1.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no options', () => { + const client = new videoIntelligenceModule.v1.VideoIntelligenceServiceClient(); + assert(client); + }); + describe('annotateVideo', function() { it('invokes annotateVideo without error', done => { const client = new videoIntelligenceModule.v1.VideoIntelligenceServiceClient( diff --git a/test/gapic-v1beta1.js b/test/gapic-v1beta1.js index 0842fe21..2e1829e9 100644 --- a/test/gapic-v1beta1.js +++ b/test/gapic-v1beta1.js @@ -23,6 +23,32 @@ const error = new Error(); error.code = FAKE_STATUS_CODE; describe('VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = + videointelligenceModule.v1beta1.VideoIntelligenceServiceClient + .servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = + videointelligenceModule.v1beta1.VideoIntelligenceServiceClient + .apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = + videointelligenceModule.v1beta1.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no options', () => { + const client = new videointelligenceModule.v1beta1.VideoIntelligenceServiceClient(); + assert(client); + }); + describe('annotateVideo', function() { it('invokes annotateVideo without error', done => { const client = new videointelligenceModule.v1beta1.VideoIntelligenceServiceClient( diff --git a/test/gapic-v1beta2.js b/test/gapic-v1beta2.js index 43d1f660..57180558 100644 --- a/test/gapic-v1beta2.js +++ b/test/gapic-v1beta2.js @@ -23,6 +23,32 @@ const error = new Error(); error.code = FAKE_STATUS_CODE; describe('VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = + videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient + .servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = + videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient + .apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = + videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no options', () => { + const client = new videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient(); + assert(client); + }); + describe('annotateVideo', function() { it('invokes annotateVideo without error', done => { const client = new videoIntelligenceModule.v1beta2.VideoIntelligenceServiceClient( diff --git a/test/gapic-v1p1beta1.js b/test/gapic-v1p1beta1.js index 08f1266c..68391fa5 100644 --- a/test/gapic-v1p1beta1.js +++ b/test/gapic-v1p1beta1.js @@ -23,6 +23,32 @@ const error = new Error(); error.code = FAKE_STATUS_CODE; describe('VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = + videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient + .servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = + videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient + .apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = + videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no options', () => { + const client = new videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient(); + assert(client); + }); + describe('annotateVideo', function() { it('invokes annotateVideo without error', done => { const client = new videoIntelligenceModule.v1p1beta1.VideoIntelligenceServiceClient( diff --git a/test/gapic-v1p2beta1.js b/test/gapic-v1p2beta1.js index c9af82d8..2995558c 100644 --- a/test/gapic-v1p2beta1.js +++ b/test/gapic-v1p2beta1.js @@ -23,6 +23,32 @@ const error = new Error(); error.code = FAKE_STATUS_CODE; describe('VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = + videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient + .servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = + videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient + .apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = + videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no options', () => { + const client = new videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient(); + assert(client); + }); + describe('annotateVideo', function() { it('invokes annotateVideo without error', done => { const client = new videoIntelligenceModule.v1p2beta1.VideoIntelligenceServiceClient( diff --git a/test/gapic-v1p3beta1.js b/test/gapic-v1p3beta1.js index 79c56511..fa30cdb8 100644 --- a/test/gapic-v1p3beta1.js +++ b/test/gapic-v1p3beta1.js @@ -24,6 +24,32 @@ const error = new Error(); error.code = FAKE_STATUS_CODE; describe('VideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = + videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient + .servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = + videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient + .apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = + videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient.port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no options', () => { + const client = new videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient(); + assert(client); + }); + describe('annotateVideo', function() { it('invokes annotateVideo without error', done => { const client = new videoIntelligenceModule.v1p3beta1.VideoIntelligenceServiceClient( @@ -125,6 +151,33 @@ describe('VideoIntelligenceServiceClient', () => { }); }); describe('StreamingVideoIntelligenceServiceClient', () => { + it('has servicePath', () => { + const servicePath = + videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient + .servicePath; + assert(servicePath); + }); + + it('has apiEndpoint', () => { + const apiEndpoint = + videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient + .apiEndpoint; + assert(apiEndpoint); + }); + + it('has port', () => { + const port = + videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient + .port; + assert(port); + assert(typeof port === 'number'); + }); + + it('should create a client with no options', () => { + const client = new videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient(); + assert(client); + }); + describe('streamingAnnotateVideo', () => { it('invokes streamingAnnotateVideo without error', done => { const client = new videoIntelligenceModule.v1p3beta1.StreamingVideoIntelligenceServiceClient(