diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java index ddb827c4a2a5..13c3aa3214a1 100644 --- a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java @@ -477,7 +477,7 @@ public interface ComputerVisionClient { * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @return the ImageDescription object if successful. */ - ImageDescription describeImage(String url, String maxCandidates, String language); + ImageDescription describeImage(String url, Integer maxCandidates, String language); /** * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. @@ -489,7 +489,7 @@ public interface ComputerVisionClient { * @throws IllegalArgumentException thrown if parameters fail the validation * @return the {@link ServiceFuture} object */ - ServiceFuture describeImageAsync(String url, String maxCandidates, String language, final ServiceCallback serviceCallback); + ServiceFuture describeImageAsync(String url, Integer maxCandidates, String language, final ServiceCallback serviceCallback); /** * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. @@ -500,7 +500,7 @@ public interface ComputerVisionClient { * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageDescription object */ - Observable describeImageAsync(String url, String maxCandidates, String language); + Observable describeImageAsync(String url, Integer maxCandidates, String language); /** * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. @@ -511,7 +511,7 @@ public interface ComputerVisionClient { * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageDescription object */ - Observable> describeImageWithServiceResponseAsync(String url, String maxCandidates, String language); + Observable> describeImageWithServiceResponseAsync(String url, Integer maxCandidates, String language); /** * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. @@ -807,51 +807,51 @@ public interface ComputerVisionClient { * * @param image An image stream. * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. - * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' * @throws IllegalArgumentException thrown if parameters fail the validation * @throws ComputerVisionErrorException thrown if the request is rejected by server * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @return the ImageAnalysis object if successful. */ - ImageAnalysis analyzeImageInStream(byte[] image, List visualFeatures, String details, String language); + ImageAnalysis analyzeImageInStream(byte[] image, List visualFeatures, List
details, String language); /** * This operation extracts a rich set of visual features based on the image content. * * @param image An image stream. * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. - * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' * @param serviceCallback the async ServiceCallback to handle successful and failed responses. * @throws IllegalArgumentException thrown if parameters fail the validation * @return the {@link ServiceFuture} object */ - ServiceFuture analyzeImageInStreamAsync(byte[] image, List visualFeatures, String details, String language, final ServiceCallback serviceCallback); + ServiceFuture analyzeImageInStreamAsync(byte[] image, List visualFeatures, List
details, String language, final ServiceCallback serviceCallback); /** * This operation extracts a rich set of visual features based on the image content. * * @param image An image stream. * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. - * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageAnalysis object */ - Observable analyzeImageInStreamAsync(byte[] image, List visualFeatures, String details, String language); + Observable analyzeImageInStreamAsync(byte[] image, List visualFeatures, List
details, String language); /** * This operation extracts a rich set of visual features based on the image content. * * @param image An image stream. * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. - * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageAnalysis object */ - Observable> analyzeImageInStreamWithServiceResponseAsync(byte[] image, List visualFeatures, String details, String language); + Observable> analyzeImageInStreamWithServiceResponseAsync(byte[] image, List visualFeatures, List
details, String language); /** * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. @@ -1088,7 +1088,7 @@ public interface ComputerVisionClient { * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @return the ImageDescription object if successful. */ - ImageDescription describeImageInStream(byte[] image, String maxCandidates, String language); + ImageDescription describeImageInStream(byte[] image, Integer maxCandidates, String language); /** * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. @@ -1100,7 +1100,7 @@ public interface ComputerVisionClient { * @throws IllegalArgumentException thrown if parameters fail the validation * @return the {@link ServiceFuture} object */ - ServiceFuture describeImageInStreamAsync(byte[] image, String maxCandidates, String language, final ServiceCallback serviceCallback); + ServiceFuture describeImageInStreamAsync(byte[] image, Integer maxCandidates, String language, final ServiceCallback serviceCallback); /** * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. @@ -1111,7 +1111,7 @@ public interface ComputerVisionClient { * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageDescription object */ - Observable describeImageInStreamAsync(byte[] image, String maxCandidates, String language); + Observable describeImageInStreamAsync(byte[] image, Integer maxCandidates, String language); /** * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. @@ -1122,7 +1122,7 @@ public interface ComputerVisionClient { * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageDescription object */ - Observable> describeImageInStreamWithServiceResponseAsync(byte[] image, String maxCandidates, String language); + Observable> describeImageInStreamWithServiceResponseAsync(byte[] image, Integer maxCandidates, String language); /** * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java index 0a9ac20b05fd..e920a0b65275 100644 --- a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java @@ -240,7 +240,7 @@ interface ComputerVisionClientService { @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient describeImage" }) @POST("describe") - Observable> describeImage(@Query("maxCandidates") String maxCandidates, @Query("language") String language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + Observable> describeImage(@Query("maxCandidates") Integer maxCandidates, @Query("language") String language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient tagImage" }) @POST("tag") @@ -273,7 +273,7 @@ interface ComputerVisionClientService { @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient describeImageInStream" }) @POST("describe") - Observable> describeImageInStream(@Query("maxCandidates") String maxCandidates, @Query("language") String language, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + Observable> describeImageInStream(@Query("maxCandidates") Integer maxCandidates, @Query("language") String language, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient tagImageInStream" }) @POST("tag") @@ -920,7 +920,7 @@ public Observable> describeImageWithServiceRes if (url == null) { throw new IllegalArgumentException("Parameter url is required and cannot be null."); } - final String maxCandidates = null; + final Integer maxCandidates = null; final String language = null; ImageUrl imageUrl = new ImageUrl(); imageUrl.withUrl(url); @@ -950,7 +950,7 @@ public Observable> call(Response * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @return the ImageDescription object if successful. */ - public ImageDescription describeImage(String url, String maxCandidates, String language) { + public ImageDescription describeImage(String url, Integer maxCandidates, String language) { return describeImageWithServiceResponseAsync(url, maxCandidates, language).toBlocking().single().body(); } @@ -964,7 +964,7 @@ public ImageDescription describeImage(String url, String maxCandidates, String l * @throws IllegalArgumentException thrown if parameters fail the validation * @return the {@link ServiceFuture} object */ - public ServiceFuture describeImageAsync(String url, String maxCandidates, String language, final ServiceCallback serviceCallback) { + public ServiceFuture describeImageAsync(String url, Integer maxCandidates, String language, final ServiceCallback serviceCallback) { return ServiceFuture.fromResponse(describeImageWithServiceResponseAsync(url, maxCandidates, language), serviceCallback); } @@ -977,7 +977,7 @@ public ServiceFuture describeImageAsync(String url, String max * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageDescription object */ - public Observable describeImageAsync(String url, String maxCandidates, String language) { + public Observable describeImageAsync(String url, Integer maxCandidates, String language) { return describeImageWithServiceResponseAsync(url, maxCandidates, language).map(new Func1, ImageDescription>() { @Override public ImageDescription call(ServiceResponse response) { @@ -995,7 +995,7 @@ public ImageDescription call(ServiceResponse response) { * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageDescription object */ - public Observable> describeImageWithServiceResponseAsync(String url, String maxCandidates, String language) { + public Observable> describeImageWithServiceResponseAsync(String url, Integer maxCandidates, String language) { if (this.endpoint() == null) { throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); } @@ -1570,11 +1570,11 @@ public Observable> analyzeImageInStreamWithServic throw new IllegalArgumentException("Parameter image is required and cannot be null."); } final List visualFeatures = null; - final String details = null; + final List
details = null; final String language = null; String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); - String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); - return service.analyzeImageInStream(visualFeaturesConverted, details, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);String detailsConverted = this.serializerAdapter().serializeList(details, CollectionFormat.CSV);RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.analyzeImageInStream(visualFeaturesConverted, detailsConverted, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) .flatMap(new Func1, Observable>>() { @Override public Observable> call(Response response) { @@ -1593,14 +1593,14 @@ public Observable> call(Response re * * @param image An image stream. * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. - * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' * @throws IllegalArgumentException thrown if parameters fail the validation * @throws ComputerVisionErrorException thrown if the request is rejected by server * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @return the ImageAnalysis object if successful. */ - public ImageAnalysis analyzeImageInStream(byte[] image, List visualFeatures, String details, String language) { + public ImageAnalysis analyzeImageInStream(byte[] image, List visualFeatures, List
details, String language) { return analyzeImageInStreamWithServiceResponseAsync(image, visualFeatures, details, language).toBlocking().single().body(); } @@ -1609,13 +1609,13 @@ public ImageAnalysis analyzeImageInStream(byte[] image, List * * @param image An image stream. * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. - * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' * @param serviceCallback the async ServiceCallback to handle successful and failed responses. * @throws IllegalArgumentException thrown if parameters fail the validation * @return the {@link ServiceFuture} object */ - public ServiceFuture analyzeImageInStreamAsync(byte[] image, List visualFeatures, String details, String language, final ServiceCallback serviceCallback) { + public ServiceFuture analyzeImageInStreamAsync(byte[] image, List visualFeatures, List
details, String language, final ServiceCallback serviceCallback) { return ServiceFuture.fromResponse(analyzeImageInStreamWithServiceResponseAsync(image, visualFeatures, details, language), serviceCallback); } @@ -1624,12 +1624,12 @@ public ServiceFuture analyzeImageInStreamAsync(byte[] image, List * * @param image An image stream. * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. - * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageAnalysis object */ - public Observable analyzeImageInStreamAsync(byte[] image, List visualFeatures, String details, String language) { + public Observable analyzeImageInStreamAsync(byte[] image, List visualFeatures, List
details, String language) { return analyzeImageInStreamWithServiceResponseAsync(image, visualFeatures, details, language).map(new Func1, ImageAnalysis>() { @Override public ImageAnalysis call(ServiceResponse response) { @@ -1643,12 +1643,12 @@ public ImageAnalysis call(ServiceResponse response) { * * @param image An image stream. * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. - * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageAnalysis object */ - public Observable> analyzeImageInStreamWithServiceResponseAsync(byte[] image, List visualFeatures, String details, String language) { + public Observable> analyzeImageInStreamWithServiceResponseAsync(byte[] image, List visualFeatures, List
details, String language) { if (this.endpoint() == null) { throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); } @@ -1656,9 +1656,10 @@ public Observable> analyzeImageInStreamWithServic throw new IllegalArgumentException("Parameter image is required and cannot be null."); } Validator.validate(visualFeatures); + Validator.validate(details); String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); - String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); - return service.analyzeImageInStream(visualFeaturesConverted, details, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);String detailsConverted = this.serializerAdapter().serializeList(details, CollectionFormat.CSV);RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.analyzeImageInStream(visualFeaturesConverted, detailsConverted, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) .flatMap(new Func1, Observable>>() { @Override public Observable> call(Response response) { @@ -2066,7 +2067,7 @@ public Observable> describeImageInStreamWithSe if (image == null) { throw new IllegalArgumentException("Parameter image is required and cannot be null."); } - final String maxCandidates = null; + final Integer maxCandidates = null; final String language = null; String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); @@ -2095,7 +2096,7 @@ public Observable> call(Response * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @return the ImageDescription object if successful. */ - public ImageDescription describeImageInStream(byte[] image, String maxCandidates, String language) { + public ImageDescription describeImageInStream(byte[] image, Integer maxCandidates, String language) { return describeImageInStreamWithServiceResponseAsync(image, maxCandidates, language).toBlocking().single().body(); } @@ -2109,7 +2110,7 @@ public ImageDescription describeImageInStream(byte[] image, String maxCandidates * @throws IllegalArgumentException thrown if parameters fail the validation * @return the {@link ServiceFuture} object */ - public ServiceFuture describeImageInStreamAsync(byte[] image, String maxCandidates, String language, final ServiceCallback serviceCallback) { + public ServiceFuture describeImageInStreamAsync(byte[] image, Integer maxCandidates, String language, final ServiceCallback serviceCallback) { return ServiceFuture.fromResponse(describeImageInStreamWithServiceResponseAsync(image, maxCandidates, language), serviceCallback); } @@ -2122,7 +2123,7 @@ public ServiceFuture describeImageInStreamAsync(byte[] image, * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageDescription object */ - public Observable describeImageInStreamAsync(byte[] image, String maxCandidates, String language) { + public Observable describeImageInStreamAsync(byte[] image, Integer maxCandidates, String language) { return describeImageInStreamWithServiceResponseAsync(image, maxCandidates, language).map(new Func1, ImageDescription>() { @Override public ImageDescription call(ServiceResponse response) { @@ -2140,7 +2141,7 @@ public ImageDescription call(ServiceResponse response) { * @throws IllegalArgumentException thrown if parameters fail the validation * @return the observable to the ImageDescription object */ - public Observable> describeImageInStreamWithServiceResponseAsync(byte[] image, String maxCandidates, String language) { + public Observable> describeImageInStreamWithServiceResponseAsync(byte[] image, Integer maxCandidates, String language) { if (this.endpoint() == null) { throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); } diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java index 3132074a6191..6a16a774589a 100644 --- a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java @@ -43,7 +43,7 @@ public class ColorInfo { * A value indicating if the image is black and white. */ @JsonProperty(value = "isBWImg") - private Boolean isBWImg; + private boolean isBWImg; /** * Get possible dominant foreground color. @@ -130,7 +130,7 @@ public ColorInfo withAccentColor(String accentColor) { * * @return the isBWImg value */ - public Boolean isBWImg() { + public boolean isBWImg() { return this.isBWImg; } @@ -140,7 +140,7 @@ public Boolean isBWImg() { * @param isBWImg the isBWImg value to set * @return the ColorInfo object itself. */ - public ColorInfo withIsBWImg(Boolean isBWImg) { + public ColorInfo withIsBWImg(boolean isBWImg) { this.isBWImg = isBWImg; return this; } diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java index 5ba29df4bd8a..a5631156ea64 100644 --- a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java @@ -18,20 +18,20 @@ public class ImageType { * Confidence level that the image is a clip art. */ @JsonProperty(value = "clipArtType") - private double clipArtType; + private int clipArtType; /** * Confidence level that the image is a line drawing. */ @JsonProperty(value = "lineDrawingType") - private double lineDrawingType; + private int lineDrawingType; /** * Get confidence level that the image is a clip art. * * @return the clipArtType value */ - public double clipArtType() { + public int clipArtType() { return this.clipArtType; } @@ -41,7 +41,7 @@ public double clipArtType() { * @param clipArtType the clipArtType value to set * @return the ImageType object itself. */ - public ImageType withClipArtType(double clipArtType) { + public ImageType withClipArtType(int clipArtType) { this.clipArtType = clipArtType; return this; } @@ -51,7 +51,7 @@ public ImageType withClipArtType(double clipArtType) { * * @return the lineDrawingType value */ - public double lineDrawingType() { + public int lineDrawingType() { return this.lineDrawingType; } @@ -61,7 +61,7 @@ public double lineDrawingType() { * @param lineDrawingType the lineDrawingType value to set * @return the ImageType object itself. */ - public ImageType withLineDrawingType(double lineDrawingType) { + public ImageType withLineDrawingType(int lineDrawingType) { this.lineDrawingType = lineDrawingType; return this; } diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java new file mode 100644 index 000000000000..13c3aa3214a1 --- /dev/null +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionClient.java @@ -0,0 +1,1339 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.microsoft.azure.AzureClient; +import com.microsoft.azure.CloudException; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.Details; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.DomainModelResults; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.ImageAnalysis; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.ImageDescription; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.ListModelsResult; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.OcrLanguages; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.OcrResult; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.RecognizeTextHeaders; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.RecognizeTextInStreamHeaders; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.TagResult; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.TextOperationResult; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.TextRecognitionMode; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.VisualFeatureTypes; +import com.microsoft.rest.RestClient; +import com.microsoft.rest.ServiceCallback; +import com.microsoft.rest.ServiceFuture; +import com.microsoft.rest.ServiceResponse; +import com.microsoft.rest.ServiceResponseWithHeaders; +import java.io.InputStream; +import java.io.IOException; +import java.util.List; +import rx.Observable; + +/** + * The interface for ComputerVisionClient class. + */ +public interface ComputerVisionClient { + /** + * Gets the REST client. + * + * @return the {@link RestClient} object. + */ + RestClient restClient(); + + /** + * Gets the {@link AzureClient} used for long running operations. + * @return the azure client; + */ + AzureClient getAzureClient(); + + /** + * Gets the User-Agent header for the client. + * + * @return the user agent string. + */ + String userAgent(); + + /** + * Gets Supported Cognitive Services endpoints. + * + * @return the endpoint value. + */ + String endpoint(); + + /** + * Sets Supported Cognitive Services endpoints. + * + * @param endpoint the endpoint value. + * @return the service client itself + */ + ComputerVisionClient withEndpoint(String endpoint); + + /** + * Gets Gets or sets the preferred language for the response.. + * + * @return the acceptLanguage value. + */ + String acceptLanguage(); + + /** + * Sets Gets or sets the preferred language for the response.. + * + * @param acceptLanguage the acceptLanguage value. + * @return the service client itself + */ + ComputerVisionClient withAcceptLanguage(String acceptLanguage); + + /** + * Gets Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30.. + * + * @return the longRunningOperationRetryTimeout value. + */ + int longRunningOperationRetryTimeout(); + + /** + * Sets Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30.. + * + * @param longRunningOperationRetryTimeout the longRunningOperationRetryTimeout value. + * @return the service client itself + */ + ComputerVisionClient withLongRunningOperationRetryTimeout(int longRunningOperationRetryTimeout); + + /** + * Gets When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true.. + * + * @return the generateClientRequestId value. + */ + boolean generateClientRequestId(); + + /** + * Sets When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true.. + * + * @param generateClientRequestId the generateClientRequestId value. + * @return the service client itself + */ + ComputerVisionClient withGenerateClientRequestId(boolean generateClientRequestId); + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ListModelsResult object if successful. + */ + ListModelsResult listModels(); + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture listModelsAsync(final ServiceCallback serviceCallback); + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ListModelsResult object + */ + Observable listModelsAsync(); + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ListModelsResult object + */ + Observable> listModelsWithServiceResponseAsync(); + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysis object if successful. + */ + ImageAnalysis analyzeImage(String url); + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture analyzeImageAsync(String url, final ServiceCallback serviceCallback); + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + Observable analyzeImageAsync(String url); + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + Observable> analyzeImageWithServiceResponseAsync(String url); + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysis object if successful. + */ + ImageAnalysis analyzeImage(String url, List visualFeatures, List
details, String language); + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture analyzeImageAsync(String url, List visualFeatures, List
details, String language, final ServiceCallback serviceCallback); + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + Observable analyzeImageAsync(String url, List visualFeatures, List
details, String language); + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + Observable> analyzeImageWithServiceResponseAsync(String url, List visualFeatures, List
details, String language); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + InputStream generateThumbnail(int width, int height, String url); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture generateThumbnailAsync(int width, int height, String url, final ServiceCallback serviceCallback); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + Observable generateThumbnailAsync(int width, int height, String url); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + Observable> generateThumbnailWithServiceResponseAsync(int width, int height, String url); + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + InputStream generateThumbnail(int width, int height, String url, Boolean smartCropping); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture generateThumbnailAsync(int width, int height, String url, Boolean smartCropping, final ServiceCallback serviceCallback); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + Observable generateThumbnailAsync(int width, int height, String url, Boolean smartCropping); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + Observable> generateThumbnailWithServiceResponseAsync(int width, int height, String url, Boolean smartCropping); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResult object if successful. + */ + OcrResult recognizePrintedText(boolean detectOrientation, String url); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture recognizePrintedTextAsync(boolean detectOrientation, String url, final ServiceCallback serviceCallback); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + Observable recognizePrintedTextAsync(boolean detectOrientation, String url); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + Observable> recognizePrintedTextWithServiceResponseAsync(boolean detectOrientation, String url); + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResult object if successful. + */ + OcrResult recognizePrintedText(boolean detectOrientation, String url, OcrLanguages language); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture recognizePrintedTextAsync(boolean detectOrientation, String url, OcrLanguages language, final ServiceCallback serviceCallback); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + Observable recognizePrintedTextAsync(boolean detectOrientation, String url, OcrLanguages language); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + Observable> recognizePrintedTextWithServiceResponseAsync(boolean detectOrientation, String url, OcrLanguages language); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescription object if successful. + */ + ImageDescription describeImage(String url); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture describeImageAsync(String url, final ServiceCallback serviceCallback); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + Observable describeImageAsync(String url); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + Observable> describeImageWithServiceResponseAsync(String url); + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescription object if successful. + */ + ImageDescription describeImage(String url, Integer maxCandidates, String language); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture describeImageAsync(String url, Integer maxCandidates, String language, final ServiceCallback serviceCallback); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + Observable describeImageAsync(String url, Integer maxCandidates, String language); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + Observable> describeImageWithServiceResponseAsync(String url, Integer maxCandidates, String language); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResult object if successful. + */ + TagResult tagImage(String url); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture tagImageAsync(String url, final ServiceCallback serviceCallback); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + Observable tagImageAsync(String url); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + Observable> tagImageWithServiceResponseAsync(String url); + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResult object if successful. + */ + TagResult tagImage(String url, String language); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture tagImageAsync(String url, String language, final ServiceCallback serviceCallback); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + Observable tagImageAsync(String url, String language); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + Observable> tagImageWithServiceResponseAsync(String url, String language); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResults object if successful. + */ + DomainModelResults analyzeImageByDomain(String model, String url); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture analyzeImageByDomainAsync(String model, String url, final ServiceCallback serviceCallback); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + Observable analyzeImageByDomainAsync(String model, String url); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + Observable> analyzeImageByDomainWithServiceResponseAsync(String model, String url); + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResults object if successful. + */ + DomainModelResults analyzeImageByDomain(String model, String url, String language); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture analyzeImageByDomainAsync(String model, String url, String language, final ServiceCallback serviceCallback); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + Observable analyzeImageByDomainAsync(String model, String url, String language); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + Observable> analyzeImageByDomainWithServiceResponseAsync(String model, String url, String language); + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + */ + void recognizeText(String url, TextRecognitionMode mode); + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture recognizeTextAsync(String url, TextRecognitionMode mode, final ServiceCallback serviceCallback); + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + Observable recognizeTextAsync(String url, TextRecognitionMode mode); + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + Observable> recognizeTextWithServiceResponseAsync(String url, TextRecognitionMode mode); + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Text' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TextOperationResult object if successful. + */ + TextOperationResult getTextOperationResult(String operationId); + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Text' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture getTextOperationResultAsync(String operationId, final ServiceCallback serviceCallback); + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Text' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TextOperationResult object + */ + Observable getTextOperationResultAsync(String operationId); + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Text' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TextOperationResult object + */ + Observable> getTextOperationResultWithServiceResponseAsync(String operationId); + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysis object if successful. + */ + ImageAnalysis analyzeImageInStream(byte[] image); + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture analyzeImageInStreamAsync(byte[] image, final ServiceCallback serviceCallback); + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + Observable analyzeImageInStreamAsync(byte[] image); + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + Observable> analyzeImageInStreamWithServiceResponseAsync(byte[] image); + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysis object if successful. + */ + ImageAnalysis analyzeImageInStream(byte[] image, List visualFeatures, List
details, String language); + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture analyzeImageInStreamAsync(byte[] image, List visualFeatures, List
details, String language, final ServiceCallback serviceCallback); + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + Observable analyzeImageInStreamAsync(byte[] image, List visualFeatures, List
details, String language); + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + Observable> analyzeImageInStreamWithServiceResponseAsync(byte[] image, List visualFeatures, List
details, String language); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + InputStream generateThumbnailInStream(int width, int height, byte[] image); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture generateThumbnailInStreamAsync(int width, int height, byte[] image, final ServiceCallback serviceCallback); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + Observable generateThumbnailInStreamAsync(int width, int height, byte[] image); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + Observable> generateThumbnailInStreamWithServiceResponseAsync(int width, int height, byte[] image); + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + InputStream generateThumbnailInStream(int width, int height, byte[] image, Boolean smartCropping); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture generateThumbnailInStreamAsync(int width, int height, byte[] image, Boolean smartCropping, final ServiceCallback serviceCallback); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + Observable generateThumbnailInStreamAsync(int width, int height, byte[] image, Boolean smartCropping); + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + Observable> generateThumbnailInStreamWithServiceResponseAsync(int width, int height, byte[] image, Boolean smartCropping); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResult object if successful. + */ + OcrResult recognizePrintedTextInStream(boolean detectOrientation, byte[] image); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image, final ServiceCallback serviceCallback); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + Observable recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + Observable> recognizePrintedTextInStreamWithServiceResponseAsync(boolean detectOrientation, byte[] image); + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResult object if successful. + */ + OcrResult recognizePrintedTextInStream(boolean detectOrientation, byte[] image, OcrLanguages language); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image, OcrLanguages language, final ServiceCallback serviceCallback); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + Observable recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image, OcrLanguages language); + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + Observable> recognizePrintedTextInStreamWithServiceResponseAsync(boolean detectOrientation, byte[] image, OcrLanguages language); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescription object if successful. + */ + ImageDescription describeImageInStream(byte[] image); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture describeImageInStreamAsync(byte[] image, final ServiceCallback serviceCallback); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + Observable describeImageInStreamAsync(byte[] image); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + Observable> describeImageInStreamWithServiceResponseAsync(byte[] image); + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescription object if successful. + */ + ImageDescription describeImageInStream(byte[] image, Integer maxCandidates, String language); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture describeImageInStreamAsync(byte[] image, Integer maxCandidates, String language, final ServiceCallback serviceCallback); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + Observable describeImageInStreamAsync(byte[] image, Integer maxCandidates, String language); + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + Observable> describeImageInStreamWithServiceResponseAsync(byte[] image, Integer maxCandidates, String language); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResult object if successful. + */ + TagResult tagImageInStream(byte[] image); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture tagImageInStreamAsync(byte[] image, final ServiceCallback serviceCallback); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + Observable tagImageInStreamAsync(byte[] image); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + Observable> tagImageInStreamWithServiceResponseAsync(byte[] image); + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResult object if successful. + */ + TagResult tagImageInStream(byte[] image, String language); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture tagImageInStreamAsync(byte[] image, String language, final ServiceCallback serviceCallback); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + Observable tagImageInStreamAsync(byte[] image, String language); + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + Observable> tagImageInStreamWithServiceResponseAsync(byte[] image, String language); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResults object if successful. + */ + DomainModelResults analyzeImageByDomainInStream(String model, byte[] image); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture analyzeImageByDomainInStreamAsync(String model, byte[] image, final ServiceCallback serviceCallback); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + Observable analyzeImageByDomainInStreamAsync(String model, byte[] image); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + Observable> analyzeImageByDomainInStreamWithServiceResponseAsync(String model, byte[] image); + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResults object if successful. + */ + DomainModelResults analyzeImageByDomainInStream(String model, byte[] image, String language); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture analyzeImageByDomainInStreamAsync(String model, byte[] image, String language, final ServiceCallback serviceCallback); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + Observable analyzeImageByDomainInStreamAsync(String model, byte[] image, String language); + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + Observable> analyzeImageByDomainInStreamWithServiceResponseAsync(String model, byte[] image, String language); + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param image An image stream. + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + */ + void recognizeTextInStream(byte[] image, TextRecognitionMode mode); + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param image An image stream. + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + ServiceFuture recognizeTextInStreamAsync(byte[] image, TextRecognitionMode mode, final ServiceCallback serviceCallback); + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param image An image stream. + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + Observable recognizeTextInStreamAsync(byte[] image, TextRecognitionMode mode); + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param image An image stream. + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + Observable> recognizeTextInStreamWithServiceResponseAsync(byte[] image, TextRecognitionMode mode); + +} diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java new file mode 100644 index 000000000000..e920a0b65275 --- /dev/null +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionClientImpl.java @@ -0,0 +1,2580 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; + +import com.google.common.base.Joiner; +import com.google.common.reflect.TypeToken; +import com.microsoft.azure.AzureClient; +import com.microsoft.azure.AzureServiceClient; +import com.microsoft.azure.CloudException; +import com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.ComputerVisionErrorException; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.Details; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.DomainModelResults; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.ImageAnalysis; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.ImageDescription; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.ImageUrl; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.ListModelsResult; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.OcrLanguages; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.OcrResult; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.RecognizeTextHeaders; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.RecognizeTextInStreamHeaders; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.TagResult; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.TextOperationResult; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.TextRecognitionMode; +import com.microsoft.azure.cognitiveservices.vision.computervision.models.VisualFeatureTypes; +import com.microsoft.rest.CollectionFormat; +import com.microsoft.rest.credentials.ServiceClientCredentials; +import com.microsoft.rest.RestClient; +import com.microsoft.rest.ServiceCallback; +import com.microsoft.rest.ServiceFuture; +import com.microsoft.rest.ServiceResponse; +import com.microsoft.rest.ServiceResponseWithHeaders; +import com.microsoft.rest.Validator; +import java.io.InputStream; +import java.io.IOException; +import java.util.List; +import okhttp3.MediaType; +import okhttp3.RequestBody; +import okhttp3.ResponseBody; +import retrofit2.http.Body; +import retrofit2.http.GET; +import retrofit2.http.Header; +import retrofit2.http.Headers; +import retrofit2.http.Path; +import retrofit2.http.POST; +import retrofit2.http.Query; +import retrofit2.http.Streaming; +import retrofit2.Response; +import rx.functions.Func1; +import rx.Observable; + +/** + * Initializes a new instance of the ComputerVisionClientImpl class. + */ +public class ComputerVisionClientImpl extends AzureServiceClient implements ComputerVisionClient { + /** The Retrofit service to perform REST calls. */ + private ComputerVisionClientService service; + /** the {@link AzureClient} used for long running operations. */ + private AzureClient azureClient; + + /** + * Gets the {@link AzureClient} used for long running operations. + * @return the azure client; + */ + public AzureClient getAzureClient() { + return this.azureClient; + } + + /** Supported Cognitive Services endpoints. */ + private String endpoint; + + /** + * Gets Supported Cognitive Services endpoints. + * + * @return the endpoint value. + */ + public String endpoint() { + return this.endpoint; + } + + /** + * Sets Supported Cognitive Services endpoints. + * + * @param endpoint the endpoint value. + * @return the service client itself + */ + public ComputerVisionClientImpl withEndpoint(String endpoint) { + this.endpoint = endpoint; + return this; + } + + /** Gets or sets the preferred language for the response. */ + private String acceptLanguage; + + /** + * Gets Gets or sets the preferred language for the response. + * + * @return the acceptLanguage value. + */ + public String acceptLanguage() { + return this.acceptLanguage; + } + + /** + * Sets Gets or sets the preferred language for the response. + * + * @param acceptLanguage the acceptLanguage value. + * @return the service client itself + */ + public ComputerVisionClientImpl withAcceptLanguage(String acceptLanguage) { + this.acceptLanguage = acceptLanguage; + return this; + } + + /** Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30. */ + private int longRunningOperationRetryTimeout; + + /** + * Gets Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30. + * + * @return the longRunningOperationRetryTimeout value. + */ + public int longRunningOperationRetryTimeout() { + return this.longRunningOperationRetryTimeout; + } + + /** + * Sets Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30. + * + * @param longRunningOperationRetryTimeout the longRunningOperationRetryTimeout value. + * @return the service client itself + */ + public ComputerVisionClientImpl withLongRunningOperationRetryTimeout(int longRunningOperationRetryTimeout) { + this.longRunningOperationRetryTimeout = longRunningOperationRetryTimeout; + return this; + } + + /** When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true. */ + private boolean generateClientRequestId; + + /** + * Gets When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true. + * + * @return the generateClientRequestId value. + */ + public boolean generateClientRequestId() { + return this.generateClientRequestId; + } + + /** + * Sets When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true. + * + * @param generateClientRequestId the generateClientRequestId value. + * @return the service client itself + */ + public ComputerVisionClientImpl withGenerateClientRequestId(boolean generateClientRequestId) { + this.generateClientRequestId = generateClientRequestId; + return this; + } + + /** + * Initializes an instance of ComputerVisionClient client. + * + * @param credentials the management credentials for Azure + */ + public ComputerVisionClientImpl(ServiceClientCredentials credentials) { + this("https://{Endpoint}/vision/v2.0", credentials); + } + + /** + * Initializes an instance of ComputerVisionClient client. + * + * @param baseUrl the base URL of the host + * @param credentials the management credentials for Azure + */ + private ComputerVisionClientImpl(String baseUrl, ServiceClientCredentials credentials) { + super(baseUrl, credentials); + initialize(); + } + + /** + * Initializes an instance of ComputerVisionClient client. + * + * @param restClient the REST client to connect to Azure. + */ + public ComputerVisionClientImpl(RestClient restClient) { + super(restClient); + initialize(); + } + + protected void initialize() { + this.acceptLanguage = "en-US"; + this.longRunningOperationRetryTimeout = 30; + this.generateClientRequestId = true; + this.azureClient = new AzureClient(this); + initializeService(); + } + + /** + * Gets the User-Agent header for the client. + * + * @return the user agent string. + */ + @Override + public String userAgent() { + return String.format("%s (%s, %s)", super.userAgent(), "ComputerVisionClient", "2.0"); + } + + private void initializeService() { + service = restClient().retrofit().create(ComputerVisionClientService.class); + } + + /** + * The interface defining all the services for ComputerVisionClient to be + * used by Retrofit to perform actually REST calls. + */ + interface ComputerVisionClientService { + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient listModels" }) + @GET("models") + Observable> listModels(@Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient analyzeImage" }) + @POST("analyze") + Observable> analyzeImage(@Query("visualFeatures") String visualFeatures, @Query("details") String details, @Query("language") String language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient generateThumbnail" }) + @POST("generateThumbnail") + @Streaming + Observable> generateThumbnail(@Query("width") int width, @Query("height") int height, @Query("smartCropping") Boolean smartCropping, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient recognizePrintedText" }) + @POST("ocr") + Observable> recognizePrintedText(@Query("detectOrientation") boolean detectOrientation, @Query("language") OcrLanguages language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient describeImage" }) + @POST("describe") + Observable> describeImage(@Query("maxCandidates") Integer maxCandidates, @Query("language") String language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient tagImage" }) + @POST("tag") + Observable> tagImage(@Query("language") String language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient analyzeImageByDomain" }) + @POST("models/{model}/analyze") + Observable> analyzeImageByDomain(@Path("model") String model, @Query("language") String language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient recognizeText" }) + @POST("recognizeText") + Observable> recognizeText(@Query("mode") TextRecognitionMode mode, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient getTextOperationResult" }) + @GET("textOperations/{operationId}") + Observable> getTextOperationResult(@Path("operationId") String operationId, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient analyzeImageInStream" }) + @POST("analyze") + Observable> analyzeImageInStream(@Query("visualFeatures") String visualFeatures, @Query("details") String details, @Query("language") String language, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient generateThumbnailInStream" }) + @POST("generateThumbnail") + @Streaming + Observable> generateThumbnailInStream(@Query("width") int width, @Query("height") int height, @Body RequestBody image, @Query("smartCropping") Boolean smartCropping, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient recognizePrintedTextInStream" }) + @POST("ocr") + Observable> recognizePrintedTextInStream(@Query("language") OcrLanguages language, @Query("detectOrientation") boolean detectOrientation, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient describeImageInStream" }) + @POST("describe") + Observable> describeImageInStream(@Query("maxCandidates") Integer maxCandidates, @Query("language") String language, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient tagImageInStream" }) + @POST("tag") + Observable> tagImageInStream(@Query("language") String language, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient analyzeImageByDomainInStream" }) + @POST("models/{model}/analyze") + Observable> analyzeImageByDomainInStream(@Path("model") String model, @Query("language") String language, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionClient recognizeTextInStream" }) + @POST("recognizeText") + Observable> recognizeTextInStream(@Body RequestBody image, @Query("mode") TextRecognitionMode mode, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + } + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ListModelsResult object if successful. + */ + public ListModelsResult listModels() { + return listModelsWithServiceResponseAsync().toBlocking().single().body(); + } + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture listModelsAsync(final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(listModelsWithServiceResponseAsync(), serviceCallback); + } + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ListModelsResult object + */ + public Observable listModelsAsync() { + return listModelsWithServiceResponseAsync().map(new Func1, ListModelsResult>() { + @Override + public ListModelsResult call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ListModelsResult object + */ + public Observable> listModelsWithServiceResponseAsync() { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.listModels(this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = listModelsDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse listModelsDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysis object if successful. + */ + public ImageAnalysis analyzeImage(String url) { + return analyzeImageWithServiceResponseAsync(url).toBlocking().single().body(); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageAsync(String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageWithServiceResponseAsync(url), serviceCallback); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + public Observable analyzeImageAsync(String url) { + return analyzeImageWithServiceResponseAsync(url).map(new Func1, ImageAnalysis>() { + @Override + public ImageAnalysis call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + public Observable> analyzeImageWithServiceResponseAsync(String url) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final List visualFeatures = null; + final List
details = null; + final String language = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);String detailsConverted = this.serializerAdapter().serializeList(details, CollectionFormat.CSV); + return service.analyzeImage(visualFeaturesConverted, detailsConverted, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysis object if successful. + */ + public ImageAnalysis analyzeImage(String url, List visualFeatures, List
details, String language) { + return analyzeImageWithServiceResponseAsync(url, visualFeatures, details, language).toBlocking().single().body(); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageAsync(String url, List visualFeatures, List
details, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageWithServiceResponseAsync(url, visualFeatures, details, language), serviceCallback); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + public Observable analyzeImageAsync(String url, List visualFeatures, List
details, String language) { + return analyzeImageWithServiceResponseAsync(url, visualFeatures, details, language).map(new Func1, ImageAnalysis>() { + @Override + public ImageAnalysis call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + public Observable> analyzeImageWithServiceResponseAsync(String url, List visualFeatures, List
details, String language) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + Validator.validate(visualFeatures); + Validator.validate(details); + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);String detailsConverted = this.serializerAdapter().serializeList(details, CollectionFormat.CSV); + return service.analyzeImage(visualFeaturesConverted, detailsConverted, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse analyzeImageDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + public InputStream generateThumbnail(int width, int height, String url) { + return generateThumbnailWithServiceResponseAsync(width, height, url).toBlocking().single().body(); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture generateThumbnailAsync(int width, int height, String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(generateThumbnailWithServiceResponseAsync(width, height, url), serviceCallback); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable generateThumbnailAsync(int width, int height, String url) { + return generateThumbnailWithServiceResponseAsync(width, height, url).map(new Func1, InputStream>() { + @Override + public InputStream call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable> generateThumbnailWithServiceResponseAsync(int width, int height, String url) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final Boolean smartCropping = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.generateThumbnail(width, height, smartCropping, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = generateThumbnailDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + public InputStream generateThumbnail(int width, int height, String url, Boolean smartCropping) { + return generateThumbnailWithServiceResponseAsync(width, height, url, smartCropping).toBlocking().single().body(); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture generateThumbnailAsync(int width, int height, String url, Boolean smartCropping, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(generateThumbnailWithServiceResponseAsync(width, height, url, smartCropping), serviceCallback); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable generateThumbnailAsync(int width, int height, String url, Boolean smartCropping) { + return generateThumbnailWithServiceResponseAsync(width, height, url, smartCropping).map(new Func1, InputStream>() { + @Override + public InputStream call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable> generateThumbnailWithServiceResponseAsync(int width, int height, String url, Boolean smartCropping) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.generateThumbnail(width, height, smartCropping, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = generateThumbnailDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse generateThumbnailDelegate(Response response) throws CloudException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(CloudException.class) + .build(response); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResult object if successful. + */ + public OcrResult recognizePrintedText(boolean detectOrientation, String url) { + return recognizePrintedTextWithServiceResponseAsync(detectOrientation, url).toBlocking().single().body(); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizePrintedTextAsync(boolean detectOrientation, String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(recognizePrintedTextWithServiceResponseAsync(detectOrientation, url), serviceCallback); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + public Observable recognizePrintedTextAsync(boolean detectOrientation, String url) { + return recognizePrintedTextWithServiceResponseAsync(detectOrientation, url).map(new Func1, OcrResult>() { + @Override + public OcrResult call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + public Observable> recognizePrintedTextWithServiceResponseAsync(boolean detectOrientation, String url) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final OcrLanguages language = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.recognizePrintedText(detectOrientation, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = recognizePrintedTextDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResult object if successful. + */ + public OcrResult recognizePrintedText(boolean detectOrientation, String url, OcrLanguages language) { + return recognizePrintedTextWithServiceResponseAsync(detectOrientation, url, language).toBlocking().single().body(); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizePrintedTextAsync(boolean detectOrientation, String url, OcrLanguages language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(recognizePrintedTextWithServiceResponseAsync(detectOrientation, url, language), serviceCallback); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + public Observable recognizePrintedTextAsync(boolean detectOrientation, String url, OcrLanguages language) { + return recognizePrintedTextWithServiceResponseAsync(detectOrientation, url, language).map(new Func1, OcrResult>() { + @Override + public OcrResult call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + public Observable> recognizePrintedTextWithServiceResponseAsync(boolean detectOrientation, String url, OcrLanguages language) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.recognizePrintedText(detectOrientation, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = recognizePrintedTextDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse recognizePrintedTextDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescription object if successful. + */ + public ImageDescription describeImage(String url) { + return describeImageWithServiceResponseAsync(url).toBlocking().single().body(); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture describeImageAsync(String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(describeImageWithServiceResponseAsync(url), serviceCallback); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + public Observable describeImageAsync(String url) { + return describeImageWithServiceResponseAsync(url).map(new Func1, ImageDescription>() { + @Override + public ImageDescription call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + public Observable> describeImageWithServiceResponseAsync(String url) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final Integer maxCandidates = null; + final String language = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.describeImage(maxCandidates, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = describeImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescription object if successful. + */ + public ImageDescription describeImage(String url, Integer maxCandidates, String language) { + return describeImageWithServiceResponseAsync(url, maxCandidates, language).toBlocking().single().body(); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture describeImageAsync(String url, Integer maxCandidates, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(describeImageWithServiceResponseAsync(url, maxCandidates, language), serviceCallback); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + public Observable describeImageAsync(String url, Integer maxCandidates, String language) { + return describeImageWithServiceResponseAsync(url, maxCandidates, language).map(new Func1, ImageDescription>() { + @Override + public ImageDescription call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + public Observable> describeImageWithServiceResponseAsync(String url, Integer maxCandidates, String language) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.describeImage(maxCandidates, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = describeImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse describeImageDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResult object if successful. + */ + public TagResult tagImage(String url) { + return tagImageWithServiceResponseAsync(url).toBlocking().single().body(); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture tagImageAsync(String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(tagImageWithServiceResponseAsync(url), serviceCallback); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + public Observable tagImageAsync(String url) { + return tagImageWithServiceResponseAsync(url).map(new Func1, TagResult>() { + @Override + public TagResult call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + public Observable> tagImageWithServiceResponseAsync(String url) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final String language = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.tagImage(language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = tagImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResult object if successful. + */ + public TagResult tagImage(String url, String language) { + return tagImageWithServiceResponseAsync(url, language).toBlocking().single().body(); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture tagImageAsync(String url, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(tagImageWithServiceResponseAsync(url, language), serviceCallback); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + public Observable tagImageAsync(String url, String language) { + return tagImageWithServiceResponseAsync(url, language).map(new Func1, TagResult>() { + @Override + public TagResult call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + public Observable> tagImageWithServiceResponseAsync(String url, String language) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.tagImage(language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = tagImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse tagImageDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResults object if successful. + */ + public DomainModelResults analyzeImageByDomain(String model, String url) { + return analyzeImageByDomainWithServiceResponseAsync(model, url).toBlocking().single().body(); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageByDomainAsync(String model, String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageByDomainWithServiceResponseAsync(model, url), serviceCallback); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + public Observable analyzeImageByDomainAsync(String model, String url) { + return analyzeImageByDomainWithServiceResponseAsync(model, url).map(new Func1, DomainModelResults>() { + @Override + public DomainModelResults call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + public Observable> analyzeImageByDomainWithServiceResponseAsync(String model, String url) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (model == null) { + throw new IllegalArgumentException("Parameter model is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final String language = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.analyzeImageByDomain(model, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageByDomainDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResults object if successful. + */ + public DomainModelResults analyzeImageByDomain(String model, String url, String language) { + return analyzeImageByDomainWithServiceResponseAsync(model, url, language).toBlocking().single().body(); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageByDomainAsync(String model, String url, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageByDomainWithServiceResponseAsync(model, url, language), serviceCallback); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + public Observable analyzeImageByDomainAsync(String model, String url, String language) { + return analyzeImageByDomainWithServiceResponseAsync(model, url, language).map(new Func1, DomainModelResults>() { + @Override + public DomainModelResults call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + public Observable> analyzeImageByDomainWithServiceResponseAsync(String model, String url, String language) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (model == null) { + throw new IllegalArgumentException("Parameter model is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.analyzeImageByDomain(model, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageByDomainDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse analyzeImageByDomainDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + */ + public void recognizeText(String url, TextRecognitionMode mode) { + recognizeTextWithServiceResponseAsync(url, mode).toBlocking().single().body(); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizeTextAsync(String url, TextRecognitionMode mode, final ServiceCallback serviceCallback) { + return ServiceFuture.fromHeaderResponse(recognizeTextWithServiceResponseAsync(url, mode), serviceCallback); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable recognizeTextAsync(String url, TextRecognitionMode mode) { + return recognizeTextWithServiceResponseAsync(url, mode).map(new Func1, Void>() { + @Override + public Void call(ServiceResponseWithHeaders response) { + return response.body(); + } + }); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable> recognizeTextWithServiceResponseAsync(String url, TextRecognitionMode mode) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (mode == null) { + throw new IllegalArgumentException("Parameter mode is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.recognizeText(mode, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponseWithHeaders clientResponse = recognizeTextDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponseWithHeaders recognizeTextDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(202, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .buildWithHeaders(response, RecognizeTextHeaders.class); + } + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Text' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TextOperationResult object if successful. + */ + public TextOperationResult getTextOperationResult(String operationId) { + return getTextOperationResultWithServiceResponseAsync(operationId).toBlocking().single().body(); + } + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Text' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture getTextOperationResultAsync(String operationId, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(getTextOperationResultWithServiceResponseAsync(operationId), serviceCallback); + } + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Text' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TextOperationResult object + */ + public Observable getTextOperationResultAsync(String operationId) { + return getTextOperationResultWithServiceResponseAsync(operationId).map(new Func1, TextOperationResult>() { + @Override + public TextOperationResult call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Text' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TextOperationResult object + */ + public Observable> getTextOperationResultWithServiceResponseAsync(String operationId) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (operationId == null) { + throw new IllegalArgumentException("Parameter operationId is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + return service.getTextOperationResult(operationId, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = getTextOperationResultDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse getTextOperationResultDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysis object if successful. + */ + public ImageAnalysis analyzeImageInStream(byte[] image) { + return analyzeImageInStreamWithServiceResponseAsync(image).toBlocking().single().body(); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageInStreamAsync(byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageInStreamWithServiceResponseAsync(image), serviceCallback); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + public Observable analyzeImageInStreamAsync(byte[] image) { + return analyzeImageInStreamWithServiceResponseAsync(image).map(new Func1, ImageAnalysis>() { + @Override + public ImageAnalysis call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + public Observable> analyzeImageInStreamWithServiceResponseAsync(byte[] image) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final List visualFeatures = null; + final List
details = null; + final String language = null; + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);String detailsConverted = this.serializerAdapter().serializeList(details, CollectionFormat.CSV);RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.analyzeImageInStream(visualFeaturesConverted, detailsConverted, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysis object if successful. + */ + public ImageAnalysis analyzeImageInStream(byte[] image, List visualFeatures, List
details, String language) { + return analyzeImageInStreamWithServiceResponseAsync(image, visualFeatures, details, language).toBlocking().single().body(); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageInStreamAsync(byte[] image, List visualFeatures, List
details, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageInStreamWithServiceResponseAsync(image, visualFeatures, details, language), serviceCallback); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + public Observable analyzeImageInStreamAsync(byte[] image, List visualFeatures, List
details, String language) { + return analyzeImageInStreamWithServiceResponseAsync(image, visualFeatures, details, language).map(new Func1, ImageAnalysis>() { + @Override + public ImageAnalysis call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysis object + */ + public Observable> analyzeImageInStreamWithServiceResponseAsync(byte[] image, List visualFeatures, List
details, String language) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + Validator.validate(visualFeatures); + Validator.validate(details); + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);String detailsConverted = this.serializerAdapter().serializeList(details, CollectionFormat.CSV);RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.analyzeImageInStream(visualFeaturesConverted, detailsConverted, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse analyzeImageInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + public InputStream generateThumbnailInStream(int width, int height, byte[] image) { + return generateThumbnailInStreamWithServiceResponseAsync(width, height, image).toBlocking().single().body(); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture generateThumbnailInStreamAsync(int width, int height, byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(generateThumbnailInStreamWithServiceResponseAsync(width, height, image), serviceCallback); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable generateThumbnailInStreamAsync(int width, int height, byte[] image) { + return generateThumbnailInStreamWithServiceResponseAsync(width, height, image).map(new Func1, InputStream>() { + @Override + public InputStream call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable> generateThumbnailInStreamWithServiceResponseAsync(int width, int height, byte[] image) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final Boolean smartCropping = null; + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.generateThumbnailInStream(width, height, imageConverted, smartCropping, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = generateThumbnailInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + public InputStream generateThumbnailInStream(int width, int height, byte[] image, Boolean smartCropping) { + return generateThumbnailInStreamWithServiceResponseAsync(width, height, image, smartCropping).toBlocking().single().body(); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture generateThumbnailInStreamAsync(int width, int height, byte[] image, Boolean smartCropping, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(generateThumbnailInStreamWithServiceResponseAsync(width, height, image, smartCropping), serviceCallback); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable generateThumbnailInStreamAsync(int width, int height, byte[] image, Boolean smartCropping) { + return generateThumbnailInStreamWithServiceResponseAsync(width, height, image, smartCropping).map(new Func1, InputStream>() { + @Override + public InputStream call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable> generateThumbnailInStreamWithServiceResponseAsync(int width, int height, byte[] image, Boolean smartCropping) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.generateThumbnailInStream(width, height, imageConverted, smartCropping, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = generateThumbnailInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse generateThumbnailInStreamDelegate(Response response) throws CloudException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(CloudException.class) + .build(response); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResult object if successful. + */ + public OcrResult recognizePrintedTextInStream(boolean detectOrientation, byte[] image) { + return recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image).toBlocking().single().body(); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image), serviceCallback); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + public Observable recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image) { + return recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image).map(new Func1, OcrResult>() { + @Override + public OcrResult call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + public Observable> recognizePrintedTextInStreamWithServiceResponseAsync(boolean detectOrientation, byte[] image) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final OcrLanguages language = null; + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.recognizePrintedTextInStream(language, detectOrientation, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = recognizePrintedTextInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResult object if successful. + */ + public OcrResult recognizePrintedTextInStream(boolean detectOrientation, byte[] image, OcrLanguages language) { + return recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image, language).toBlocking().single().body(); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image, OcrLanguages language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image, language), serviceCallback); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + public Observable recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image, OcrLanguages language) { + return recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image, language).map(new Func1, OcrResult>() { + @Override + public OcrResult call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResult object + */ + public Observable> recognizePrintedTextInStreamWithServiceResponseAsync(boolean detectOrientation, byte[] image, OcrLanguages language) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.recognizePrintedTextInStream(language, detectOrientation, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = recognizePrintedTextInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse recognizePrintedTextInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescription object if successful. + */ + public ImageDescription describeImageInStream(byte[] image) { + return describeImageInStreamWithServiceResponseAsync(image).toBlocking().single().body(); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture describeImageInStreamAsync(byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(describeImageInStreamWithServiceResponseAsync(image), serviceCallback); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + public Observable describeImageInStreamAsync(byte[] image) { + return describeImageInStreamWithServiceResponseAsync(image).map(new Func1, ImageDescription>() { + @Override + public ImageDescription call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + public Observable> describeImageInStreamWithServiceResponseAsync(byte[] image) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final Integer maxCandidates = null; + final String language = null; + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.describeImageInStream(maxCandidates, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = describeImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescription object if successful. + */ + public ImageDescription describeImageInStream(byte[] image, Integer maxCandidates, String language) { + return describeImageInStreamWithServiceResponseAsync(image, maxCandidates, language).toBlocking().single().body(); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture describeImageInStreamAsync(byte[] image, Integer maxCandidates, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(describeImageInStreamWithServiceResponseAsync(image, maxCandidates, language), serviceCallback); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + public Observable describeImageInStreamAsync(byte[] image, Integer maxCandidates, String language) { + return describeImageInStreamWithServiceResponseAsync(image, maxCandidates, language).map(new Func1, ImageDescription>() { + @Override + public ImageDescription call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescription object + */ + public Observable> describeImageInStreamWithServiceResponseAsync(byte[] image, Integer maxCandidates, String language) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.describeImageInStream(maxCandidates, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = describeImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse describeImageInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResult object if successful. + */ + public TagResult tagImageInStream(byte[] image) { + return tagImageInStreamWithServiceResponseAsync(image).toBlocking().single().body(); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture tagImageInStreamAsync(byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(tagImageInStreamWithServiceResponseAsync(image), serviceCallback); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + public Observable tagImageInStreamAsync(byte[] image) { + return tagImageInStreamWithServiceResponseAsync(image).map(new Func1, TagResult>() { + @Override + public TagResult call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + public Observable> tagImageInStreamWithServiceResponseAsync(byte[] image) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final String language = null; + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.tagImageInStream(language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = tagImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResult object if successful. + */ + public TagResult tagImageInStream(byte[] image, String language) { + return tagImageInStreamWithServiceResponseAsync(image, language).toBlocking().single().body(); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture tagImageInStreamAsync(byte[] image, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(tagImageInStreamWithServiceResponseAsync(image, language), serviceCallback); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + public Observable tagImageInStreamAsync(byte[] image, String language) { + return tagImageInStreamWithServiceResponseAsync(image, language).map(new Func1, TagResult>() { + @Override + public TagResult call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResult object + */ + public Observable> tagImageInStreamWithServiceResponseAsync(byte[] image, String language) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.tagImageInStream(language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = tagImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse tagImageInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResults object if successful. + */ + public DomainModelResults analyzeImageByDomainInStream(String model, byte[] image) { + return analyzeImageByDomainInStreamWithServiceResponseAsync(model, image).toBlocking().single().body(); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageByDomainInStreamAsync(String model, byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageByDomainInStreamWithServiceResponseAsync(model, image), serviceCallback); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + public Observable analyzeImageByDomainInStreamAsync(String model, byte[] image) { + return analyzeImageByDomainInStreamWithServiceResponseAsync(model, image).map(new Func1, DomainModelResults>() { + @Override + public DomainModelResults call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + public Observable> analyzeImageByDomainInStreamWithServiceResponseAsync(String model, byte[] image) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (model == null) { + throw new IllegalArgumentException("Parameter model is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final String language = null; + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.analyzeImageByDomainInStream(model, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageByDomainInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResults object if successful. + */ + public DomainModelResults analyzeImageByDomainInStream(String model, byte[] image, String language) { + return analyzeImageByDomainInStreamWithServiceResponseAsync(model, image, language).toBlocking().single().body(); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageByDomainInStreamAsync(String model, byte[] image, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageByDomainInStreamWithServiceResponseAsync(model, image, language), serviceCallback); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + public Observable analyzeImageByDomainInStreamAsync(String model, byte[] image, String language) { + return analyzeImageByDomainInStreamWithServiceResponseAsync(model, image, language).map(new Func1, DomainModelResults>() { + @Override + public DomainModelResults call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResults object + */ + public Observable> analyzeImageByDomainInStreamWithServiceResponseAsync(String model, byte[] image, String language) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (model == null) { + throw new IllegalArgumentException("Parameter model is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.analyzeImageByDomainInStream(model, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageByDomainInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse analyzeImageByDomainInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param image An image stream. + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + */ + public void recognizeTextInStream(byte[] image, TextRecognitionMode mode) { + recognizeTextInStreamWithServiceResponseAsync(image, mode).toBlocking().single().body(); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param image An image stream. + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizeTextInStreamAsync(byte[] image, TextRecognitionMode mode, final ServiceCallback serviceCallback) { + return ServiceFuture.fromHeaderResponse(recognizeTextInStreamWithServiceResponseAsync(image, mode), serviceCallback); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param image An image stream. + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable recognizeTextInStreamAsync(byte[] image, TextRecognitionMode mode) { + return recognizeTextInStreamWithServiceResponseAsync(image, mode).map(new Func1, Void>() { + @Override + public Void call(ServiceResponseWithHeaders response) { + return response.body(); + } + }); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Recognize Text Operation Result operation. + * + * @param image An image stream. + * @param mode Type of text to recognize. Possible values include: 'Handwritten', 'Printed' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable> recognizeTextInStreamWithServiceResponseAsync(byte[] image, TextRecognitionMode mode) { + if (this.endpoint() == null) { + throw new IllegalArgumentException("Parameter this.endpoint() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + if (mode == null) { + throw new IllegalArgumentException("Parameter mode is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{Endpoint}", this.endpoint()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.recognizeTextInStream(imageConverted, mode, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponseWithHeaders clientResponse = recognizeTextInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponseWithHeaders recognizeTextInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(202, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .buildWithHeaders(response, RecognizeTextInStreamHeaders.class); + } + +} diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java index 70eaef23dc65..a0fdb3318f4b 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java @@ -5,7 +5,7 @@ // Code generated by Microsoft (R) AutoRest Code Generator. /** - * This package contains the implementation classes for ComputerVisionAPI. + * This package contains the implementation classes for ComputerVisionClient. * The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively. */ package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AdultInfo.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AdultInfo.java index 205ea9d90a00..8b43895d9501 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AdultInfo.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/AdultInfo.java @@ -41,7 +41,7 @@ public class AdultInfo { private double racyScore; /** - * Get the isAdultContent value. + * Get a value indicating if the image contains adult-oriented content. * * @return the isAdultContent value */ @@ -50,7 +50,7 @@ public boolean isAdultContent() { } /** - * Set the isAdultContent value. + * Set a value indicating if the image contains adult-oriented content. * * @param isAdultContent the isAdultContent value to set * @return the AdultInfo object itself. @@ -61,7 +61,7 @@ public AdultInfo withIsAdultContent(boolean isAdultContent) { } /** - * Get the isRacyContent value. + * Get a value indicating if the image is race. * * @return the isRacyContent value */ @@ -70,7 +70,7 @@ public boolean isRacyContent() { } /** - * Set the isRacyContent value. + * Set a value indicating if the image is race. * * @param isRacyContent the isRacyContent value to set * @return the AdultInfo object itself. @@ -81,7 +81,7 @@ public AdultInfo withIsRacyContent(boolean isRacyContent) { } /** - * Get the adultScore value. + * Get score from 0 to 1 that indicates how much of adult content is within the image. * * @return the adultScore value */ @@ -90,7 +90,7 @@ public double adultScore() { } /** - * Set the adultScore value. + * Set score from 0 to 1 that indicates how much of adult content is within the image. * * @param adultScore the adultScore value to set * @return the AdultInfo object itself. @@ -101,7 +101,7 @@ public AdultInfo withAdultScore(double adultScore) { } /** - * Get the racyScore value. + * Get score from 0 to 1 that indicates how suggestive is the image. * * @return the racyScore value */ @@ -110,7 +110,7 @@ public double racyScore() { } /** - * Set the racyScore value. + * Set score from 0 to 1 that indicates how suggestive is the image. * * @param racyScore the racyScore value to set * @return the AdultInfo object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Category.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Category.java index dd592c4ccfdb..ed04d7d2e2a0 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Category.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Category.java @@ -24,7 +24,7 @@ public class Category { * Scoring of the category. */ @JsonProperty(value = "score") - private Double score; + private double score; /** * The detail property. @@ -33,7 +33,7 @@ public class Category { private CategoryDetail detail; /** - * Get the name value. + * Get name of the category. * * @return the name value */ @@ -42,7 +42,7 @@ public String name() { } /** - * Set the name value. + * Set name of the category. * * @param name the name value to set * @return the Category object itself. @@ -53,21 +53,21 @@ public Category withName(String name) { } /** - * Get the score value. + * Get scoring of the category. * * @return the score value */ - public Double score() { + public double score() { return this.score; } /** - * Set the score value. + * Set scoring of the category. * * @param score the score value to set * @return the Category object itself. */ - public Category withScore(Double score) { + public Category withScore(double score) { this.score = score; return this; } diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CategoryDetail.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CategoryDetail.java index 22e683f3b919..79c03de15172 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CategoryDetail.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CategoryDetail.java @@ -22,7 +22,13 @@ public class CategoryDetail { private List celebrities; /** - * Get the celebrities value. + * An array of landmarks if any identified. + */ + @JsonProperty(value = "landmarks") + private List landmarks; + + /** + * Get an array of celebrities if any identified. * * @return the celebrities value */ @@ -31,7 +37,7 @@ public List celebrities() { } /** - * Set the celebrities value. + * Set an array of celebrities if any identified. * * @param celebrities the celebrities value to set * @return the CategoryDetail object itself. @@ -41,4 +47,24 @@ public CategoryDetail withCelebrities(List celebrities) { return this; } + /** + * Get an array of landmarks if any identified. + * + * @return the landmarks value + */ + public List landmarks() { + return this.landmarks; + } + + /** + * Set an array of landmarks if any identified. + * + * @param landmarks the landmarks value to set + * @return the CategoryDetail object itself. + */ + public CategoryDetail withLandmarks(List landmarks) { + this.landmarks = landmarks; + return this; + } + } diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebritiesModel.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebritiesModel.java index 3cc852a62222..34c8a7aef1d9 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebritiesModel.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebritiesModel.java @@ -24,7 +24,7 @@ public class CelebritiesModel { * Level of confidence ranging from 0 to 1. */ @JsonProperty(value = "confidence") - private Double confidence; + private double confidence; /** * The faceRectangle property. @@ -33,7 +33,7 @@ public class CelebritiesModel { private FaceRectangle faceRectangle; /** - * Get the name value. + * Get name of the celebrity. * * @return the name value */ @@ -42,7 +42,7 @@ public String name() { } /** - * Set the name value. + * Set name of the celebrity. * * @param name the name value to set * @return the CelebritiesModel object itself. @@ -53,21 +53,21 @@ public CelebritiesModel withName(String name) { } /** - * Get the confidence value. + * Get level of confidence ranging from 0 to 1. * * @return the confidence value */ - public Double confidence() { + public double confidence() { return this.confidence; } /** - * Set the confidence value. + * Set level of confidence ranging from 0 to 1. * * @param confidence the confidence value to set * @return the CelebritiesModel object itself. */ - public CelebritiesModel withConfidence(Double confidence) { + public CelebritiesModel withConfidence(double confidence) { this.confidence = confidence; return this; } diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebrityResults.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebrityResults.java new file mode 100644 index 000000000000..340c1a828245 --- /dev/null +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/CelebrityResults.java @@ -0,0 +1,96 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.models; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * List of celebrities recognized in the image. + */ +public class CelebrityResults { + /** + * The celebrities property. + */ + @JsonProperty(value = "celebrities") + private List celebrities; + + /** + * Id of the REST API request. + */ + @JsonProperty(value = "requestId") + private String requestId; + + /** + * The metadata property. + */ + @JsonProperty(value = "metadata") + private ImageMetadata metadata; + + /** + * Get the celebrities value. + * + * @return the celebrities value + */ + public List celebrities() { + return this.celebrities; + } + + /** + * Set the celebrities value. + * + * @param celebrities the celebrities value to set + * @return the CelebrityResults object itself. + */ + public CelebrityResults withCelebrities(List celebrities) { + this.celebrities = celebrities; + return this; + } + + /** + * Get id of the REST API request. + * + * @return the requestId value + */ + public String requestId() { + return this.requestId; + } + + /** + * Set id of the REST API request. + * + * @param requestId the requestId value to set + * @return the CelebrityResults object itself. + */ + public CelebrityResults withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value + */ + public ImageMetadata metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set + * @return the CelebrityResults object itself. + */ + public CelebrityResults withMetadata(ImageMetadata metadata) { + this.metadata = metadata; + return this; + } + +} diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java index 09e6666dd6b5..6a16a774589a 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ColorInfo.java @@ -43,10 +43,10 @@ public class ColorInfo { * A value indicating if the image is black and white. */ @JsonProperty(value = "isBWImg") - private Boolean isBWImg; + private boolean isBWImg; /** - * Get the dominantColorForeground value. + * Get possible dominant foreground color. * * @return the dominantColorForeground value */ @@ -55,7 +55,7 @@ public String dominantColorForeground() { } /** - * Set the dominantColorForeground value. + * Set possible dominant foreground color. * * @param dominantColorForeground the dominantColorForeground value to set * @return the ColorInfo object itself. @@ -66,7 +66,7 @@ public ColorInfo withDominantColorForeground(String dominantColorForeground) { } /** - * Get the dominantColorBackground value. + * Get possible dominant background color. * * @return the dominantColorBackground value */ @@ -75,7 +75,7 @@ public String dominantColorBackground() { } /** - * Set the dominantColorBackground value. + * Set possible dominant background color. * * @param dominantColorBackground the dominantColorBackground value to set * @return the ColorInfo object itself. @@ -86,7 +86,7 @@ public ColorInfo withDominantColorBackground(String dominantColorBackground) { } /** - * Get the dominantColors value. + * Get an array of possible dominant colors. * * @return the dominantColors value */ @@ -95,7 +95,7 @@ public List dominantColors() { } /** - * Set the dominantColors value. + * Set an array of possible dominant colors. * * @param dominantColors the dominantColors value to set * @return the ColorInfo object itself. @@ -106,7 +106,7 @@ public ColorInfo withDominantColors(List dominantColors) { } /** - * Get the accentColor value. + * Get possible accent color. * * @return the accentColor value */ @@ -115,7 +115,7 @@ public String accentColor() { } /** - * Set the accentColor value. + * Set possible accent color. * * @param accentColor the accentColor value to set * @return the ColorInfo object itself. @@ -126,21 +126,21 @@ public ColorInfo withAccentColor(String accentColor) { } /** - * Get the isBWImg value. + * Get a value indicating if the image is black and white. * * @return the isBWImg value */ - public Boolean isBWImg() { + public boolean isBWImg() { return this.isBWImg; } /** - * Set the isBWImg value. + * Set a value indicating if the image is black and white. * * @param isBWImg the isBWImg value to set * @return the ColorInfo object itself. */ - public ColorInfo withIsBWImg(Boolean isBWImg) { + public ColorInfo withIsBWImg(boolean isBWImg) { this.isBWImg = isBWImg; return this; } diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionError.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionError.java index 84072e38f369..c30fc49f18ca 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionError.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ComputerVisionError.java @@ -37,7 +37,7 @@ public class ComputerVisionError { private String requestId; /** - * Get the code value. + * Get the error code. Possible values include: 'InvalidImageUrl', 'InvalidImageFormat', 'InvalidImageSize', 'NotSupportedVisualFeature', 'NotSupportedImage', 'InvalidDetails', 'NotSupportedLanguage', 'BadArgument', 'FailedToProcess', 'Timeout', 'InternalServerError', 'Unspecified', 'StorageException'. * * @return the code value */ @@ -46,7 +46,7 @@ public ComputerVisionErrorCodes code() { } /** - * Set the code value. + * Set the error code. Possible values include: 'InvalidImageUrl', 'InvalidImageFormat', 'InvalidImageSize', 'NotSupportedVisualFeature', 'NotSupportedImage', 'InvalidDetails', 'NotSupportedLanguage', 'BadArgument', 'FailedToProcess', 'Timeout', 'InternalServerError', 'Unspecified', 'StorageException'. * * @param code the code value to set * @return the ComputerVisionError object itself. @@ -57,7 +57,7 @@ public ComputerVisionError withCode(ComputerVisionErrorCodes code) { } /** - * Get the message value. + * Get a message explaining the error reported by the service. * * @return the message value */ @@ -66,7 +66,7 @@ public String message() { } /** - * Set the message value. + * Set a message explaining the error reported by the service. * * @param message the message value to set * @return the ComputerVisionError object itself. @@ -77,7 +77,7 @@ public ComputerVisionError withMessage(String message) { } /** - * Get the requestId value. + * Get a unique request identifier. * * @return the requestId value */ @@ -86,7 +86,7 @@ public String requestId() { } /** - * Set the requestId value. + * Set a unique request identifier. * * @param requestId the requestId value to set * @return the ComputerVisionError object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DomainModelResults.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DomainModelResults.java index 7d08038c71e6..1328de425a67 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DomainModelResults.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/DomainModelResults.java @@ -34,7 +34,7 @@ public class DomainModelResults { private ImageMetadata metadata; /** - * Get the result value. + * Get model-specific response. * * @return the result value */ @@ -43,7 +43,7 @@ public Object result() { } /** - * Set the result value. + * Set model-specific response. * * @param result the result value to set * @return the DomainModelResults object itself. @@ -54,7 +54,7 @@ public DomainModelResults withResult(Object result) { } /** - * Get the requestId value. + * Get id of the REST API request. * * @return the requestId value */ @@ -63,7 +63,7 @@ public String requestId() { } /** - * Set the requestId value. + * Set id of the REST API request. * * @param requestId the requestId value to set * @return the DomainModelResults object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceDescription.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceDescription.java index e6cf2bd66be0..f0320ff8c9ef 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceDescription.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceDescription.java @@ -18,7 +18,7 @@ public class FaceDescription { * Possible age of the face. */ @JsonProperty(value = "age") - private Integer age; + private int age; /** * Possible gender of the face. Possible values include: 'Male', 'Female'. @@ -33,27 +33,27 @@ public class FaceDescription { private FaceRectangle faceRectangle; /** - * Get the age value. + * Get possible age of the face. * * @return the age value */ - public Integer age() { + public int age() { return this.age; } /** - * Set the age value. + * Set possible age of the face. * * @param age the age value to set * @return the FaceDescription object itself. */ - public FaceDescription withAge(Integer age) { + public FaceDescription withAge(int age) { this.age = age; return this; } /** - * Get the gender value. + * Get possible gender of the face. Possible values include: 'Male', 'Female'. * * @return the gender value */ @@ -62,7 +62,7 @@ public Gender gender() { } /** - * Set the gender value. + * Set possible gender of the face. Possible values include: 'Male', 'Female'. * * @param gender the gender value to set * @return the FaceDescription object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceRectangle.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceRectangle.java index 3fd6560193c5..5a8e53db4ad2 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceRectangle.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/FaceRectangle.java @@ -18,102 +18,102 @@ public class FaceRectangle { * X-coordinate of the top left point of the face. */ @JsonProperty(value = "left") - private Integer left; + private int left; /** * Y-coordinate of the top left point of the face. */ @JsonProperty(value = "top") - private Integer top; + private int top; /** * Width measured from the top-left point of the face. */ @JsonProperty(value = "width") - private Integer width; + private int width; /** * Height measured from the top-left point of the face. */ @JsonProperty(value = "height") - private Integer height; + private int height; /** - * Get the left value. + * Get x-coordinate of the top left point of the face. * * @return the left value */ - public Integer left() { + public int left() { return this.left; } /** - * Set the left value. + * Set x-coordinate of the top left point of the face. * * @param left the left value to set * @return the FaceRectangle object itself. */ - public FaceRectangle withLeft(Integer left) { + public FaceRectangle withLeft(int left) { this.left = left; return this; } /** - * Get the top value. + * Get y-coordinate of the top left point of the face. * * @return the top value */ - public Integer top() { + public int top() { return this.top; } /** - * Set the top value. + * Set y-coordinate of the top left point of the face. * * @param top the top value to set * @return the FaceRectangle object itself. */ - public FaceRectangle withTop(Integer top) { + public FaceRectangle withTop(int top) { this.top = top; return this; } /** - * Get the width value. + * Get width measured from the top-left point of the face. * * @return the width value */ - public Integer width() { + public int width() { return this.width; } /** - * Set the width value. + * Set width measured from the top-left point of the face. * * @param width the width value to set * @return the FaceRectangle object itself. */ - public FaceRectangle withWidth(Integer width) { + public FaceRectangle withWidth(int width) { this.width = width; return this; } /** - * Get the height value. + * Get height measured from the top-left point of the face. * * @return the height value */ - public Integer height() { + public int height() { return this.height; } /** - * Set the height value. + * Set height measured from the top-left point of the face. * * @param height the height value to set * @return the FaceRectangle object itself. */ - public FaceRectangle withHeight(Integer height) { + public FaceRectangle withHeight(int height) { this.height = height; return this; } diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Gender.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Gender.java new file mode 100644 index 000000000000..87989b3109f6 --- /dev/null +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/Gender.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for Gender. + */ +public enum Gender { + /** Enum value Male. */ + MALE("Male"), + + /** Enum value Female. */ + FEMALE("Female"); + + /** The actual serialized value for a Gender instance. */ + private String value; + + Gender(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a Gender instance. + * + * @param value the serialized value to parse. + * @return the parsed Gender object, or null if unable to parse. + */ + @JsonCreator + public static Gender fromString(String value) { + Gender[] items = Gender.values(); + for (Gender item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageAnalysis.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageAnalysis.java index 183a432012b9..73898d913f49 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageAnalysis.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageAnalysis.java @@ -70,7 +70,7 @@ public class ImageAnalysis { private ImageMetadata metadata; /** - * Get the categories value. + * Get an array indicating identified categories. * * @return the categories value */ @@ -79,7 +79,7 @@ public List categories() { } /** - * Set the categories value. + * Set an array indicating identified categories. * * @param categories the categories value to set * @return the ImageAnalysis object itself. @@ -150,7 +150,7 @@ public ImageAnalysis withImageType(ImageType imageType) { } /** - * Get the tags value. + * Get a list of tags with confidence level. * * @return the tags value */ @@ -159,7 +159,7 @@ public List tags() { } /** - * Set the tags value. + * Set a list of tags with confidence level. * * @param tags the tags value to set * @return the ImageAnalysis object itself. @@ -190,7 +190,7 @@ public ImageAnalysis withDescription(ImageDescriptionDetails description) { } /** - * Get the faces value. + * Get an array of possible faces within the image. * * @return the faces value */ @@ -199,7 +199,7 @@ public List faces() { } /** - * Set the faces value. + * Set an array of possible faces within the image. * * @param faces the faces value to set * @return the ImageAnalysis object itself. @@ -210,7 +210,7 @@ public ImageAnalysis withFaces(List faces) { } /** - * Get the requestId value. + * Get id of the request for tracking purposes. * * @return the requestId value */ @@ -219,7 +219,7 @@ public String requestId() { } /** - * Set the requestId value. + * Set id of the request for tracking purposes. * * @param requestId the requestId value to set * @return the ImageAnalysis object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageCaption.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageCaption.java index 28c21952f10b..030eb6749406 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageCaption.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageCaption.java @@ -24,10 +24,10 @@ public class ImageCaption { * The level of confidence the service has in the caption. */ @JsonProperty(value = "confidence") - private Double confidence; + private double confidence; /** - * Get the text value. + * Get the text of the caption. * * @return the text value */ @@ -36,7 +36,7 @@ public String text() { } /** - * Set the text value. + * Set the text of the caption. * * @param text the text value to set * @return the ImageCaption object itself. @@ -47,21 +47,21 @@ public ImageCaption withText(String text) { } /** - * Get the confidence value. + * Get the level of confidence the service has in the caption. * * @return the confidence value */ - public Double confidence() { + public double confidence() { return this.confidence; } /** - * Set the confidence value. + * Set the level of confidence the service has in the caption. * * @param confidence the confidence value to set * @return the ImageCaption object itself. */ - public ImageCaption withConfidence(Double confidence) { + public ImageCaption withConfidence(double confidence) { this.confidence = confidence; return this; } diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescription.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescription.java index 9307157aa08c..5292238f033d 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescription.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescription.java @@ -33,17 +33,17 @@ public class ImageDescription { /** * Id of the REST API request. */ - @JsonProperty(value = "description.requestId") + @JsonProperty(value = "requestId") private String requestId; /** * The metadata property. */ - @JsonProperty(value = "description.metadata") + @JsonProperty(value = "metadata") private ImageMetadata metadata; /** - * Get the tags value. + * Get a collection of image tags. * * @return the tags value */ @@ -52,7 +52,7 @@ public List tags() { } /** - * Set the tags value. + * Set a collection of image tags. * * @param tags the tags value to set * @return the ImageDescription object itself. @@ -63,7 +63,7 @@ public ImageDescription withTags(List tags) { } /** - * Get the captions value. + * Get a list of captions, sorted by confidence level. * * @return the captions value */ @@ -72,7 +72,7 @@ public List captions() { } /** - * Set the captions value. + * Set a list of captions, sorted by confidence level. * * @param captions the captions value to set * @return the ImageDescription object itself. @@ -83,7 +83,7 @@ public ImageDescription withCaptions(List captions) { } /** - * Get the requestId value. + * Get id of the REST API request. * * @return the requestId value */ @@ -92,7 +92,7 @@ public String requestId() { } /** - * Set the requestId value. + * Set id of the REST API request. * * @param requestId the requestId value to set * @return the ImageDescription object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescriptionDetails.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescriptionDetails.java index 4bb6091bb61b..761dfb209215 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescriptionDetails.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageDescriptionDetails.java @@ -29,19 +29,7 @@ public class ImageDescriptionDetails { private List captions; /** - * Id of the REST API request. - */ - @JsonProperty(value = "requestId") - private String requestId; - - /** - * The metadata property. - */ - @JsonProperty(value = "metadata") - private ImageMetadata metadata; - - /** - * Get the tags value. + * Get a collection of image tags. * * @return the tags value */ @@ -50,7 +38,7 @@ public List tags() { } /** - * Set the tags value. + * Set a collection of image tags. * * @param tags the tags value to set * @return the ImageDescriptionDetails object itself. @@ -61,7 +49,7 @@ public ImageDescriptionDetails withTags(List tags) { } /** - * Get the captions value. + * Get a list of captions, sorted by confidence level. * * @return the captions value */ @@ -70,7 +58,7 @@ public List captions() { } /** - * Set the captions value. + * Set a list of captions, sorted by confidence level. * * @param captions the captions value to set * @return the ImageDescriptionDetails object itself. @@ -80,44 +68,4 @@ public ImageDescriptionDetails withCaptions(List captions) { return this; } - /** - * Get the requestId value. - * - * @return the requestId value - */ - public String requestId() { - return this.requestId; - } - - /** - * Set the requestId value. - * - * @param requestId the requestId value to set - * @return the ImageDescriptionDetails object itself. - */ - public ImageDescriptionDetails withRequestId(String requestId) { - this.requestId = requestId; - return this; - } - - /** - * Get the metadata value. - * - * @return the metadata value - */ - public ImageMetadata metadata() { - return this.metadata; - } - - /** - * Set the metadata value. - * - * @param metadata the metadata value to set - * @return the ImageDescriptionDetails object itself. - */ - public ImageDescriptionDetails withMetadata(ImageMetadata metadata) { - this.metadata = metadata; - return this; - } - } diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageMetadata.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageMetadata.java index 9c307eb01eb3..6515c560863d 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageMetadata.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageMetadata.java @@ -18,13 +18,13 @@ public class ImageMetadata { * Image width. */ @JsonProperty(value = "width") - private Integer width; + private int width; /** * Image height. */ @JsonProperty(value = "height") - private Integer height; + private int height; /** * Image format. @@ -33,47 +33,47 @@ public class ImageMetadata { private String format; /** - * Get the width value. + * Get image width. * * @return the width value */ - public Integer width() { + public int width() { return this.width; } /** - * Set the width value. + * Set image width. * * @param width the width value to set * @return the ImageMetadata object itself. */ - public ImageMetadata withWidth(Integer width) { + public ImageMetadata withWidth(int width) { this.width = width; return this; } /** - * Get the height value. + * Get image height. * * @return the height value */ - public Integer height() { + public int height() { return this.height; } /** - * Set the height value. + * Set image height. * * @param height the height value to set * @return the ImageMetadata object itself. */ - public ImageMetadata withHeight(Integer height) { + public ImageMetadata withHeight(int height) { this.height = height; return this; } /** - * Get the format value. + * Get image format. * * @return the format value */ @@ -82,7 +82,7 @@ public String format() { } /** - * Set the format value. + * Set image format. * * @param format the format value to set * @return the ImageMetadata object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageTag.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageTag.java index b9b0296c4f2e..305777abbff1 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageTag.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageTag.java @@ -24,10 +24,16 @@ public class ImageTag { * The level of confidence the service has in the caption. */ @JsonProperty(value = "confidence") - private Double confidence; + private double confidence; /** - * Get the name value. + * Optional categorization for the tag. + */ + @JsonProperty(value = "hint") + private String hint; + + /** + * Get the tag value. * * @return the name value */ @@ -36,7 +42,7 @@ public String name() { } /** - * Set the name value. + * Set the tag value. * * @param name the name value to set * @return the ImageTag object itself. @@ -47,23 +53,43 @@ public ImageTag withName(String name) { } /** - * Get the confidence value. + * Get the level of confidence the service has in the caption. * * @return the confidence value */ - public Double confidence() { + public double confidence() { return this.confidence; } /** - * Set the confidence value. + * Set the level of confidence the service has in the caption. * * @param confidence the confidence value to set * @return the ImageTag object itself. */ - public ImageTag withConfidence(Double confidence) { + public ImageTag withConfidence(double confidence) { this.confidence = confidence; return this; } + /** + * Get optional categorization for the tag. + * + * @return the hint value + */ + public String hint() { + return this.hint; + } + + /** + * Set optional categorization for the tag. + * + * @param hint the hint value to set + * @return the ImageTag object itself. + */ + public ImageTag withHint(String hint) { + this.hint = hint; + return this; + } + } diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java index 284ed776e640..a5631156ea64 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageType.java @@ -18,50 +18,50 @@ public class ImageType { * Confidence level that the image is a clip art. */ @JsonProperty(value = "clipArtType") - private Double clipArtType; + private int clipArtType; /** * Confidence level that the image is a line drawing. */ @JsonProperty(value = "lineDrawingType") - private Double lineDrawingType; + private int lineDrawingType; /** - * Get the clipArtType value. + * Get confidence level that the image is a clip art. * * @return the clipArtType value */ - public Double clipArtType() { + public int clipArtType() { return this.clipArtType; } /** - * Set the clipArtType value. + * Set confidence level that the image is a clip art. * * @param clipArtType the clipArtType value to set * @return the ImageType object itself. */ - public ImageType withClipArtType(Double clipArtType) { + public ImageType withClipArtType(int clipArtType) { this.clipArtType = clipArtType; return this; } /** - * Get the lineDrawingType value. + * Get confidence level that the image is a line drawing. * * @return the lineDrawingType value */ - public Double lineDrawingType() { + public int lineDrawingType() { return this.lineDrawingType; } /** - * Set the lineDrawingType value. + * Set confidence level that the image is a line drawing. * * @param lineDrawingType the lineDrawingType value to set * @return the ImageType object itself. */ - public ImageType withLineDrawingType(Double lineDrawingType) { + public ImageType withLineDrawingType(int lineDrawingType) { this.lineDrawingType = lineDrawingType; return this; } diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageUrl.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageUrl.java index 5c54d441da29..c3c90edc3292 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageUrl.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ImageUrl.java @@ -21,7 +21,7 @@ public class ImageUrl { private String url; /** - * Get the url value. + * Get publicly reachable URL of an image. * * @return the url value */ @@ -30,7 +30,7 @@ public String url() { } /** - * Set the url value. + * Set publicly reachable URL of an image. * * @param url the url value to set * @return the ImageUrl object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarkResults.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarkResults.java new file mode 100644 index 000000000000..c09936de5167 --- /dev/null +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarkResults.java @@ -0,0 +1,96 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.models; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * List of landmarks recognized in the image. + */ +public class LandmarkResults { + /** + * The landmarks property. + */ + @JsonProperty(value = "landmarks") + private List landmarks; + + /** + * Id of the REST API request. + */ + @JsonProperty(value = "requestId") + private String requestId; + + /** + * The metadata property. + */ + @JsonProperty(value = "metadata") + private ImageMetadata metadata; + + /** + * Get the landmarks value. + * + * @return the landmarks value + */ + public List landmarks() { + return this.landmarks; + } + + /** + * Set the landmarks value. + * + * @param landmarks the landmarks value to set + * @return the LandmarkResults object itself. + */ + public LandmarkResults withLandmarks(List landmarks) { + this.landmarks = landmarks; + return this; + } + + /** + * Get id of the REST API request. + * + * @return the requestId value + */ + public String requestId() { + return this.requestId; + } + + /** + * Set id of the REST API request. + * + * @param requestId the requestId value to set + * @return the LandmarkResults object itself. + */ + public LandmarkResults withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value + */ + public ImageMetadata metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set + * @return the LandmarkResults object itself. + */ + public LandmarkResults withMetadata(ImageMetadata metadata) { + this.metadata = metadata; + return this; + } + +} diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarksModel.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarksModel.java new file mode 100644 index 000000000000..a9b37dfc80d9 --- /dev/null +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/LandmarksModel.java @@ -0,0 +1,69 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.models; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * A landmark recognized in the image. + */ +public class LandmarksModel { + /** + * Name of the landmark. + */ + @JsonProperty(value = "name") + private String name; + + /** + * Confidence level for the landmark recognition. + */ + @JsonProperty(value = "confidence") + private double confidence; + + /** + * Get name of the landmark. + * + * @return the name value + */ + public String name() { + return this.name; + } + + /** + * Set name of the landmark. + * + * @param name the name value to set + * @return the LandmarksModel object itself. + */ + public LandmarksModel withName(String name) { + this.name = name; + return this; + } + + /** + * Get confidence level for the landmark recognition. + * + * @return the confidence value + */ + public double confidence() { + return this.confidence; + } + + /** + * Set confidence level for the landmark recognition. + * + * @param confidence the confidence value to set + * @return the LandmarksModel object itself. + */ + public LandmarksModel withConfidence(double confidence) { + this.confidence = confidence; + return this; + } + +} diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ListModelsResult.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ListModelsResult.java index b31b8b20c8e1..3396246ce2c2 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ListModelsResult.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/ListModelsResult.java @@ -22,7 +22,7 @@ public class ListModelsResult { private List modelsProperty; /** - * Get the modelsProperty value. + * Get an array of supported models. * * @return the modelsProperty value */ diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLine.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLine.java index 58edfaa554e1..ca9bf77f6781 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLine.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrLine.java @@ -33,7 +33,7 @@ public class OcrLine { private List words; /** - * Get the boundingBox value. + * Get bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. * * @return the boundingBox value */ @@ -42,7 +42,7 @@ public String boundingBox() { } /** - * Set the boundingBox value. + * Set bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. * * @param boundingBox the boundingBox value to set * @return the OcrLine object itself. @@ -53,7 +53,7 @@ public OcrLine withBoundingBox(String boundingBox) { } /** - * Get the words value. + * Get an array of objects, where each object represents a recognized word. * * @return the words value */ @@ -62,7 +62,7 @@ public List words() { } /** - * Set the words value. + * Set an array of objects, where each object represents a recognized word. * * @param words the words value to set * @return the OcrLine object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrRegion.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrRegion.java index 80ce1cc91ce9..d38173577da0 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrRegion.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrRegion.java @@ -34,7 +34,7 @@ public class OcrRegion { private List lines; /** - * Get the boundingBox value. + * Get bounding box of a recognized region. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. * * @return the boundingBox value */ @@ -43,7 +43,7 @@ public String boundingBox() { } /** - * Set the boundingBox value. + * Set bounding box of a recognized region. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. * * @param boundingBox the boundingBox value to set * @return the OcrRegion object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrResult.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrResult.java index f414a3e2c8f5..611c8492c986 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrResult.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrResult.java @@ -34,7 +34,7 @@ public class OcrResult { * recognized correctly. */ @JsonProperty(value = "textAngle") - private Double textAngle; + private double textAngle; /** * Orientation of the text recognized in the image. The value @@ -53,7 +53,7 @@ public class OcrResult { private List regions; /** - * Get the language value. + * Get the BCP-47 language code of the text in the image. * * @return the language value */ @@ -62,7 +62,7 @@ public String language() { } /** - * Set the language value. + * Set the BCP-47 language code of the text in the image. * * @param language the language value to set * @return the OcrResult object itself. @@ -73,27 +73,27 @@ public OcrResult withLanguage(String language) { } /** - * Get the textAngle value. + * Get the angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly. * * @return the textAngle value */ - public Double textAngle() { + public double textAngle() { return this.textAngle; } /** - * Set the textAngle value. + * Set the angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly. * * @param textAngle the textAngle value to set * @return the OcrResult object itself. */ - public OcrResult withTextAngle(Double textAngle) { + public OcrResult withTextAngle(double textAngle) { this.textAngle = textAngle; return this; } /** - * Get the orientation value. + * Get orientation of the text recognized in the image. The value (up,down,left, or right) refers to the direction that the top of the recognized text is facing, after the image has been rotated around its center according to the detected text angle (see textAngle property). * * @return the orientation value */ @@ -102,7 +102,7 @@ public String orientation() { } /** - * Set the orientation value. + * Set orientation of the text recognized in the image. The value (up,down,left, or right) refers to the direction that the top of the recognized text is facing, after the image has been rotated around its center according to the detected text angle (see textAngle property). * * @param orientation the orientation value to set * @return the OcrResult object itself. @@ -113,7 +113,7 @@ public OcrResult withOrientation(String orientation) { } /** - * Get the regions value. + * Get an array of objects, where each object represents a region of recognized text. * * @return the regions value */ @@ -122,7 +122,7 @@ public List regions() { } /** - * Set the regions value. + * Set an array of objects, where each object represents a region of recognized text. * * @param regions the regions value to set * @return the OcrResult object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrWord.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrWord.java index 4fe259a3e343..de4d0ad86d6f 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrWord.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/OcrWord.java @@ -32,7 +32,7 @@ public class OcrWord { private String text; /** - * Get the boundingBox value. + * Get bounding box of a recognized word. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. * * @return the boundingBox value */ @@ -41,7 +41,7 @@ public String boundingBox() { } /** - * Set the boundingBox value. + * Set bounding box of a recognized word. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. * * @param boundingBox the boundingBox value to set * @return the OcrWord object itself. @@ -52,7 +52,7 @@ public OcrWord withBoundingBox(String boundingBox) { } /** - * Get the text value. + * Get string value of a recognized word. * * @return the text value */ @@ -61,7 +61,7 @@ public String text() { } /** - * Set the text value. + * Set string value of a recognized word. * * @param text the text value to set * @return the OcrWord object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextHeaders.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextHeaders.java new file mode 100644 index 000000000000..56af19e3bbf0 --- /dev/null +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextHeaders.java @@ -0,0 +1,44 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.models; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Defines headers for RecognizeText operation. + */ +public class RecognizeTextHeaders { + /** + * URL to query for status of the operation. The operation ID will expire + * in 48 hours. + */ + @JsonProperty(value = "Operation-Location") + private String operationLocation; + + /** + * Get uRL to query for status of the operation. The operation ID will expire in 48 hours. + * + * @return the operationLocation value + */ + public String operationLocation() { + return this.operationLocation; + } + + /** + * Set uRL to query for status of the operation. The operation ID will expire in 48 hours. + * + * @param operationLocation the operationLocation value to set + * @return the RecognizeTextHeaders object itself. + */ + public RecognizeTextHeaders withOperationLocation(String operationLocation) { + this.operationLocation = operationLocation; + return this; + } + +} diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextInStreamHeaders.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextInStreamHeaders.java new file mode 100644 index 000000000000..31b927d83fa9 --- /dev/null +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/RecognizeTextInStreamHeaders.java @@ -0,0 +1,44 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.models; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Defines headers for RecognizeTextInStream operation. + */ +public class RecognizeTextInStreamHeaders { + /** + * URL to query for status of the operation. The operation ID will expire + * in 48 hours. + */ + @JsonProperty(value = "Operation-Location") + private String operationLocation; + + /** + * Get uRL to query for status of the operation. The operation ID will expire in 48 hours. + * + * @return the operationLocation value + */ + public String operationLocation() { + return this.operationLocation; + } + + /** + * Set uRL to query for status of the operation. The operation ID will expire in 48 hours. + * + * @param operationLocation the operationLocation value to set + * @return the RecognizeTextInStreamHeaders object itself. + */ + public RecognizeTextInStreamHeaders withOperationLocation(String operationLocation) { + this.operationLocation = operationLocation; + return this; + } + +} diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagResult.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagResult.java index 013aa7f2a267..a45425a66d45 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagResult.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TagResult.java @@ -34,7 +34,7 @@ public class TagResult { private ImageMetadata metadata; /** - * Get the tags value. + * Get a list of tags with confidence level. * * @return the tags value */ @@ -43,7 +43,7 @@ public List tags() { } /** - * Set the tags value. + * Set a list of tags with confidence level. * * @param tags the tags value to set * @return the TagResult object itself. @@ -54,7 +54,7 @@ public TagResult withTags(List tags) { } /** - * Get the requestId value. + * Get id of the REST API request. * * @return the requestId value */ @@ -63,7 +63,7 @@ public String requestId() { } /** - * Set the requestId value. + * Set id of the REST API request. * * @param requestId the requestId value to set * @return the TagResult object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationResult.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationResult.java index 3df61ced2901..d3936cbab444 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationResult.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextOperationResult.java @@ -28,7 +28,7 @@ public class TextOperationResult { private RecognitionResult recognitionResult; /** - * Get the status value. + * Get status of the text operation. Possible values include: 'Not Started', 'Running', 'Failed', 'Succeeded'. * * @return the status value */ @@ -37,7 +37,7 @@ public TextOperationStatusCodes status() { } /** - * Set the status value. + * Set status of the text operation. Possible values include: 'Not Started', 'Running', 'Failed', 'Succeeded'. * * @param status the status value to set * @return the TextOperationResult object itself. diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextRecognitionMode.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextRecognitionMode.java new file mode 100644 index 000000000000..8a33c1ee4d69 --- /dev/null +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/TextRecognitionMode.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.models; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for TextRecognitionMode. + */ +public enum TextRecognitionMode { + /** Enum value Handwritten. */ + HANDWRITTEN("Handwritten"), + + /** Enum value Printed. */ + PRINTED("Printed"); + + /** The actual serialized value for a TextRecognitionMode instance. */ + private String value; + + TextRecognitionMode(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a TextRecognitionMode instance. + * + * @param value the serialized value to parse. + * @return the parsed TextRecognitionMode object, or null if unable to parse. + */ + @JsonCreator + public static TextRecognitionMode fromString(String value) { + TextRecognitionMode[] items = TextRecognitionMode.values(); + for (TextRecognitionMode item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/package-info.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/package-info.java index 5273cb6577c1..d0e708ffcdfe 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/package-info.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/models/package-info.java @@ -5,7 +5,7 @@ // Code generated by Microsoft (R) AutoRest Code Generator. /** - * This package contains the models classes for ComputerVisionAPI. + * This package contains the models classes for ComputerVisionClient. * The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively. */ package com.microsoft.azure.cognitiveservices.vision.computervision.models; diff --git a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java index c40a580b7e7c..4c4b00b6e86c 100755 --- a/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java +++ b/cognitiveservices/data-plane/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java @@ -5,7 +5,7 @@ // Code generated by Microsoft (R) AutoRest Code Generator. /** - * This package contains the classes for ComputerVisionAPI. + * This package contains the classes for ComputerVisionClient. * The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively. */ package com.microsoft.azure.cognitiveservices.vision.computervision;