diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/AdultInfo.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/AdultInfo.java new file mode 100644 index 00000000000..32755db2c86 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/AdultInfo.java @@ -0,0 +1,123 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An object describing whether the image contains adult-oriented content + * and/or is racy. + */ +public class AdultInfo { + /** + * A value indicating if the image contains adult-oriented content. + */ + @JsonProperty(value = "isAdultContent") + private boolean isAdultContent; + + /** + * A value indicating if the image is race. + */ + @JsonProperty(value = "isRacyContent") + private boolean isRacyContent; + + /** + * Score from 0 to 1 that indicates how much of adult content is within the + * image. + */ + @JsonProperty(value = "adultScore") + private double adultScore; + + /** + * Score from 0 to 1 that indicates how suggestive is the image. + */ + @JsonProperty(value = "racyScore") + private double racyScore; + + /** + * Get a value indicating if the image contains adult-oriented content. + * + * @return the isAdultContent value + */ + public boolean isAdultContent() { + return this.isAdultContent; + } + + /** + * Set a value indicating if the image contains adult-oriented content. + * + * @param isAdultContent the isAdultContent value to set + * @return the AdultInfo object itself. + */ + public AdultInfo withIsAdultContent(boolean isAdultContent) { + this.isAdultContent = isAdultContent; + return this; + } + + /** + * Get a value indicating if the image is race. + * + * @return the isRacyContent value + */ + public boolean isRacyContent() { + return this.isRacyContent; + } + + /** + * Set a value indicating if the image is race. + * + * @param isRacyContent the isRacyContent value to set + * @return the AdultInfo object itself. + */ + public AdultInfo withIsRacyContent(boolean isRacyContent) { + this.isRacyContent = isRacyContent; + return this; + } + + /** + * Get score from 0 to 1 that indicates how much of adult content is within the image. + * + * @return the adultScore value + */ + public double adultScore() { + return this.adultScore; + } + + /** + * Set score from 0 to 1 that indicates how much of adult content is within the image. + * + * @param adultScore the adultScore value to set + * @return the AdultInfo object itself. + */ + public AdultInfo withAdultScore(double adultScore) { + this.adultScore = adultScore; + return this; + } + + /** + * Get score from 0 to 1 that indicates how suggestive is the image. + * + * @return the racyScore value + */ + public double racyScore() { + return this.racyScore; + } + + /** + * Set score from 0 to 1 that indicates how suggestive is the image. + * + * @param racyScore the racyScore value to set + * @return the AdultInfo object itself. + */ + public AdultInfo withRacyScore(double racyScore) { + this.racyScore = racyScore; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/AzureRegions.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/AzureRegions.java new file mode 100644 index 00000000000..71545176fdb --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/AzureRegions.java @@ -0,0 +1,83 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for AzureRegions. + */ +public enum AzureRegions { + /** Enum value westus. */ + WESTUS("westus"), + + /** Enum value westeurope. */ + WESTEUROPE("westeurope"), + + /** Enum value southeastasia. */ + SOUTHEASTASIA("southeastasia"), + + /** Enum value eastus2. */ + EASTUS2("eastus2"), + + /** Enum value westcentralus. */ + WESTCENTRALUS("westcentralus"), + + /** Enum value westus2. */ + WESTUS2("westus2"), + + /** Enum value eastus. */ + EASTUS("eastus"), + + /** Enum value southcentralus. */ + SOUTHCENTRALUS("southcentralus"), + + /** Enum value northeurope. */ + NORTHEUROPE("northeurope"), + + /** Enum value eastasia. */ + EASTASIA("eastasia"), + + /** Enum value australiaeast. */ + AUSTRALIAEAST("australiaeast"), + + /** Enum value brazilsouth. */ + BRAZILSOUTH("brazilsouth"); + + /** The actual serialized value for a AzureRegions instance. */ + private String value; + + AzureRegions(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a AzureRegions instance. + * + * @param value the serialized value to parse. + * @return the parsed AzureRegions object, or null if unable to parse. + */ + @JsonCreator + public static AzureRegions fromString(String value) { + AzureRegions[] items = AzureRegions.values(); + for (AzureRegions item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Category.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Category.java new file mode 100644 index 00000000000..3dfa2d1013b --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Category.java @@ -0,0 +1,95 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An object describing identified category. + */ +public class Category { + /** + * Name of the category. + */ + @JsonProperty(value = "name") + private String name; + + /** + * Scoring of the category. + */ + @JsonProperty(value = "score") + private Double score; + + /** + * The detail property. + */ + @JsonProperty(value = "detail") + private CategoryDetail detail; + + /** + * Get name of the category. + * + * @return the name value + */ + public String name() { + return this.name; + } + + /** + * Set name of the category. + * + * @param name the name value to set + * @return the Category object itself. + */ + public Category withName(String name) { + this.name = name; + return this; + } + + /** + * Get scoring of the category. + * + * @return the score value + */ + public Double score() { + return this.score; + } + + /** + * Set scoring of the category. + * + * @param score the score value to set + * @return the Category object itself. + */ + public Category withScore(Double score) { + this.score = score; + return this; + } + + /** + * Get the detail value. + * + * @return the detail value + */ + public CategoryDetail detail() { + return this.detail; + } + + /** + * Set the detail value. + * + * @param detail the detail value to set + * @return the Category object itself. + */ + public Category withDetail(CategoryDetail detail) { + this.detail = detail; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/CategoryDetail.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/CategoryDetail.java new file mode 100644 index 00000000000..530da9a750a --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/CategoryDetail.java @@ -0,0 +1,44 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An object describing additional category details. + */ +public class CategoryDetail { + /** + * An array of celebrities if any identified. + */ + @JsonProperty(value = "celebrities") + private List celebrities; + + /** + * Get an array of celebrities if any identified. + * + * @return the celebrities value + */ + public List celebrities() { + return this.celebrities; + } + + /** + * Set an array of celebrities if any identified. + * + * @param celebrities the celebrities value to set + * @return the CategoryDetail object itself. + */ + public CategoryDetail withCelebrities(List celebrities) { + this.celebrities = celebrities; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/CelebritiesModel.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/CelebritiesModel.java new file mode 100644 index 00000000000..b4acbdb4cec --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/CelebritiesModel.java @@ -0,0 +1,95 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An object describing possible celebrity identification. + */ +public class CelebritiesModel { + /** + * Name of the celebrity. + */ + @JsonProperty(value = "name") + private String name; + + /** + * Level of confidence ranging from 0 to 1. + */ + @JsonProperty(value = "confidence") + private Double confidence; + + /** + * The faceRectangle property. + */ + @JsonProperty(value = "faceRectangle") + private FaceRectangle faceRectangle; + + /** + * Get name of the celebrity. + * + * @return the name value + */ + public String name() { + return this.name; + } + + /** + * Set name of the celebrity. + * + * @param name the name value to set + * @return the CelebritiesModel object itself. + */ + public CelebritiesModel withName(String name) { + this.name = name; + return this; + } + + /** + * Get level of confidence ranging from 0 to 1. + * + * @return the confidence value + */ + public Double confidence() { + return this.confidence; + } + + /** + * Set level of confidence ranging from 0 to 1. + * + * @param confidence the confidence value to set + * @return the CelebritiesModel object itself. + */ + public CelebritiesModel withConfidence(Double confidence) { + this.confidence = confidence; + return this; + } + + /** + * Get the faceRectangle value. + * + * @return the faceRectangle value + */ + public FaceRectangle faceRectangle() { + return this.faceRectangle; + } + + /** + * Set the faceRectangle value. + * + * @param faceRectangle the faceRectangle value to set + * @return the CelebritiesModel object itself. + */ + public CelebritiesModel withFaceRectangle(FaceRectangle faceRectangle) { + this.faceRectangle = faceRectangle; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/CelebrityResults.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/CelebrityResults.java new file mode 100644 index 00000000000..df59cc8eaf6 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/CelebrityResults.java @@ -0,0 +1,96 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * List of celebrities recognized in the image. + */ +public class CelebrityResults { + /** + * The celebrities property. + */ + @JsonProperty(value = "celebrities") + private List celebrities; + + /** + * Id of the REST API request. + */ + @JsonProperty(value = "requestId") + private String requestId; + + /** + * The metadata property. + */ + @JsonProperty(value = "metadata") + private ImageMetadata metadata; + + /** + * Get the celebrities value. + * + * @return the celebrities value + */ + public List celebrities() { + return this.celebrities; + } + + /** + * Set the celebrities value. + * + * @param celebrities the celebrities value to set + * @return the CelebrityResults object itself. + */ + public CelebrityResults withCelebrities(List celebrities) { + this.celebrities = celebrities; + return this; + } + + /** + * Get id of the REST API request. + * + * @return the requestId value + */ + public String requestId() { + return this.requestId; + } + + /** + * Set id of the REST API request. + * + * @param requestId the requestId value to set + * @return the CelebrityResults object itself. + */ + public CelebrityResults withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value + */ + public ImageMetadata metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set + * @return the CelebrityResults object itself. + */ + public CelebrityResults withMetadata(ImageMetadata metadata) { + this.metadata = metadata; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ColorInfo.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ColorInfo.java new file mode 100644 index 00000000000..fb163324c07 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ColorInfo.java @@ -0,0 +1,148 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An object providing additional metadata describing color attributes. + */ +public class ColorInfo { + /** + * Possible dominant foreground color. + */ + @JsonProperty(value = "dominantColorForeground") + private String dominantColorForeground; + + /** + * Possible dominant background color. + */ + @JsonProperty(value = "dominantColorBackground") + private String dominantColorBackground; + + /** + * An array of possible dominant colors. + */ + @JsonProperty(value = "dominantColors") + private List dominantColors; + + /** + * Possible accent color. + */ + @JsonProperty(value = "accentColor") + private String accentColor; + + /** + * A value indicating if the image is black and white. + */ + @JsonProperty(value = "isBWImg") + private Boolean isBWImg; + + /** + * Get possible dominant foreground color. + * + * @return the dominantColorForeground value + */ + public String dominantColorForeground() { + return this.dominantColorForeground; + } + + /** + * Set possible dominant foreground color. + * + * @param dominantColorForeground the dominantColorForeground value to set + * @return the ColorInfo object itself. + */ + public ColorInfo withDominantColorForeground(String dominantColorForeground) { + this.dominantColorForeground = dominantColorForeground; + return this; + } + + /** + * Get possible dominant background color. + * + * @return the dominantColorBackground value + */ + public String dominantColorBackground() { + return this.dominantColorBackground; + } + + /** + * Set possible dominant background color. + * + * @param dominantColorBackground the dominantColorBackground value to set + * @return the ColorInfo object itself. + */ + public ColorInfo withDominantColorBackground(String dominantColorBackground) { + this.dominantColorBackground = dominantColorBackground; + return this; + } + + /** + * Get an array of possible dominant colors. + * + * @return the dominantColors value + */ + public List dominantColors() { + return this.dominantColors; + } + + /** + * Set an array of possible dominant colors. + * + * @param dominantColors the dominantColors value to set + * @return the ColorInfo object itself. + */ + public ColorInfo withDominantColors(List dominantColors) { + this.dominantColors = dominantColors; + return this; + } + + /** + * Get possible accent color. + * + * @return the accentColor value + */ + public String accentColor() { + return this.accentColor; + } + + /** + * Set possible accent color. + * + * @param accentColor the accentColor value to set + * @return the ColorInfo object itself. + */ + public ColorInfo withAccentColor(String accentColor) { + this.accentColor = accentColor; + return this; + } + + /** + * Get a value indicating if the image is black and white. + * + * @return the isBWImg value + */ + public Boolean isBWImg() { + return this.isBWImg; + } + + /** + * Set a value indicating if the image is black and white. + * + * @param isBWImg the isBWImg value to set + * @return the ColorInfo object itself. + */ + public ColorInfo withIsBWImg(Boolean isBWImg) { + this.isBWImg = isBWImg; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionError.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionError.java new file mode 100644 index 00000000000..cb980e27e27 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionError.java @@ -0,0 +1,99 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * The ComputerVisionError model. + */ +public class ComputerVisionError { + /** + * The error code. Possible values include: 'InvalidImageUrl', + * 'InvalidImageFormat', 'InvalidImageSize', 'NotSupportedVisualFeature', + * 'NotSupportedImage', 'InvalidDetails', 'NotSupportedLanguage', + * 'BadArgument', 'FailedToProcess', 'Timeout', 'InternalServerError', + * 'Unspecified', 'StorageException'. + */ + @JsonProperty(value = "code", required = true) + private ComputerVisionErrorCodes code; + + /** + * A message explaining the error reported by the service. + */ + @JsonProperty(value = "message", required = true) + private String message; + + /** + * A unique request identifier. + */ + @JsonProperty(value = "requestId") + private String requestId; + + /** + * Get the error code. Possible values include: 'InvalidImageUrl', 'InvalidImageFormat', 'InvalidImageSize', 'NotSupportedVisualFeature', 'NotSupportedImage', 'InvalidDetails', 'NotSupportedLanguage', 'BadArgument', 'FailedToProcess', 'Timeout', 'InternalServerError', 'Unspecified', 'StorageException'. + * + * @return the code value + */ + public ComputerVisionErrorCodes code() { + return this.code; + } + + /** + * Set the error code. Possible values include: 'InvalidImageUrl', 'InvalidImageFormat', 'InvalidImageSize', 'NotSupportedVisualFeature', 'NotSupportedImage', 'InvalidDetails', 'NotSupportedLanguage', 'BadArgument', 'FailedToProcess', 'Timeout', 'InternalServerError', 'Unspecified', 'StorageException'. + * + * @param code the code value to set + * @return the ComputerVisionError object itself. + */ + public ComputerVisionError withCode(ComputerVisionErrorCodes code) { + this.code = code; + return this; + } + + /** + * Get a message explaining the error reported by the service. + * + * @return the message value + */ + public String message() { + return this.message; + } + + /** + * Set a message explaining the error reported by the service. + * + * @param message the message value to set + * @return the ComputerVisionError object itself. + */ + public ComputerVisionError withMessage(String message) { + this.message = message; + return this; + } + + /** + * Get a unique request identifier. + * + * @return the requestId value + */ + public String requestId() { + return this.requestId; + } + + /** + * Set a unique request identifier. + * + * @param requestId the requestId value to set + * @return the ComputerVisionError object itself. + */ + public ComputerVisionError withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionErrorCodes.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionErrorCodes.java new file mode 100644 index 00000000000..585e50c5175 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionErrorCodes.java @@ -0,0 +1,86 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for ComputerVisionErrorCodes. + */ +public enum ComputerVisionErrorCodes { + /** Enum value InvalidImageUrl. */ + INVALID_IMAGE_URL("InvalidImageUrl"), + + /** Enum value InvalidImageFormat. */ + INVALID_IMAGE_FORMAT("InvalidImageFormat"), + + /** Enum value InvalidImageSize. */ + INVALID_IMAGE_SIZE("InvalidImageSize"), + + /** Enum value NotSupportedVisualFeature. */ + NOT_SUPPORTED_VISUAL_FEATURE("NotSupportedVisualFeature"), + + /** Enum value NotSupportedImage. */ + NOT_SUPPORTED_IMAGE("NotSupportedImage"), + + /** Enum value InvalidDetails. */ + INVALID_DETAILS("InvalidDetails"), + + /** Enum value NotSupportedLanguage. */ + NOT_SUPPORTED_LANGUAGE("NotSupportedLanguage"), + + /** Enum value BadArgument. */ + BAD_ARGUMENT("BadArgument"), + + /** Enum value FailedToProcess. */ + FAILED_TO_PROCESS("FailedToProcess"), + + /** Enum value Timeout. */ + TIMEOUT("Timeout"), + + /** Enum value InternalServerError. */ + INTERNAL_SERVER_ERROR("InternalServerError"), + + /** Enum value Unspecified. */ + UNSPECIFIED("Unspecified"), + + /** Enum value StorageException. */ + STORAGE_EXCEPTION("StorageException"); + + /** The actual serialized value for a ComputerVisionErrorCodes instance. */ + private String value; + + ComputerVisionErrorCodes(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a ComputerVisionErrorCodes instance. + * + * @param value the serialized value to parse. + * @return the parsed ComputerVisionErrorCodes object, or null if unable to parse. + */ + @JsonCreator + public static ComputerVisionErrorCodes fromString(String value) { + ComputerVisionErrorCodes[] items = ComputerVisionErrorCodes.values(); + for (ComputerVisionErrorCodes item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionErrorException.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionErrorException.java new file mode 100644 index 00000000000..1585fcb2744 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ComputerVisionErrorException.java @@ -0,0 +1,45 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.microsoft.rest.RestException; +import okhttp3.ResponseBody; +import retrofit2.Response; + +/** + * Exception thrown for an invalid response with ComputerVisionError + * information. + */ +public class ComputerVisionErrorException extends RestException { + /** + * Initializes a new instance of the ComputerVisionErrorException class. + * + * @param message the exception message or the response content if a message is not available + * @param response the HTTP response + */ + public ComputerVisionErrorException(final String message, final Response response) { + super(message, response); + } + + /** + * Initializes a new instance of the ComputerVisionErrorException class. + * + * @param message the exception message or the response content if a message is not available + * @param response the HTTP response + * @param body the deserialized response body + */ + public ComputerVisionErrorException(final String message, final Response response, final ComputerVisionError body) { + super(message, response, body); + } + + @Override + public ComputerVisionError body() { + return (ComputerVisionError) super.body(); + } +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Details.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Details.java new file mode 100644 index 00000000000..16043033bac --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Details.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for Details. + */ +public enum Details { + /** Enum value Celebrities. */ + CELEBRITIES("Celebrities"), + + /** Enum value Landmarks. */ + LANDMARKS("Landmarks"); + + /** The actual serialized value for a Details instance. */ + private String value; + + Details(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a Details instance. + * + * @param value the serialized value to parse. + * @return the parsed Details object, or null if unable to parse. + */ + @JsonCreator + public static Details fromString(String value) { + Details[] items = Details.values(); + for (Details item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/DomainModels.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/DomainModels.java new file mode 100644 index 00000000000..6d1227c3c83 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/DomainModels.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for DomainModels. + */ +public enum DomainModels { + /** Enum value Celebrities. */ + CELEBRITIES("Celebrities"), + + /** Enum value Landmarks. */ + LANDMARKS("Landmarks"); + + /** The actual serialized value for a DomainModels instance. */ + private String value; + + DomainModels(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a DomainModels instance. + * + * @param value the serialized value to parse. + * @return the parsed DomainModels object, or null if unable to parse. + */ + @JsonCreator + public static DomainModels fromString(String value) { + DomainModels[] items = DomainModels.values(); + for (DomainModels item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/FaceDescription.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/FaceDescription.java new file mode 100644 index 00000000000..6e431aba088 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/FaceDescription.java @@ -0,0 +1,95 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An object describing a face identified in the image. + */ +public class FaceDescription { + /** + * Possible age of the face. + */ + @JsonProperty(value = "age") + private Integer age; + + /** + * Possible gender of the face. Possible values include: 'Male', 'Female'. + */ + @JsonProperty(value = "gender") + private Gender gender; + + /** + * The faceRectangle property. + */ + @JsonProperty(value = "faceRectangle") + private FaceRectangle faceRectangle; + + /** + * Get possible age of the face. + * + * @return the age value + */ + public Integer age() { + return this.age; + } + + /** + * Set possible age of the face. + * + * @param age the age value to set + * @return the FaceDescription object itself. + */ + public FaceDescription withAge(Integer age) { + this.age = age; + return this; + } + + /** + * Get possible gender of the face. Possible values include: 'Male', 'Female'. + * + * @return the gender value + */ + public Gender gender() { + return this.gender; + } + + /** + * Set possible gender of the face. Possible values include: 'Male', 'Female'. + * + * @param gender the gender value to set + * @return the FaceDescription object itself. + */ + public FaceDescription withGender(Gender gender) { + this.gender = gender; + return this; + } + + /** + * Get the faceRectangle value. + * + * @return the faceRectangle value + */ + public FaceRectangle faceRectangle() { + return this.faceRectangle; + } + + /** + * Set the faceRectangle value. + * + * @param faceRectangle the faceRectangle value to set + * @return the FaceDescription object itself. + */ + public FaceDescription withFaceRectangle(FaceRectangle faceRectangle) { + this.faceRectangle = faceRectangle; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/FaceRectangle.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/FaceRectangle.java new file mode 100644 index 00000000000..a44473d02ac --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/FaceRectangle.java @@ -0,0 +1,121 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An object describing face rectangle. + */ +public class FaceRectangle { + /** + * X-coordinate of the top left point of the face. + */ + @JsonProperty(value = "left") + private Integer left; + + /** + * Y-coordinate of the top left point of the face. + */ + @JsonProperty(value = "top") + private Integer top; + + /** + * Width measured from the top-left point of the face. + */ + @JsonProperty(value = "width") + private Integer width; + + /** + * Height measured from the top-left point of the face. + */ + @JsonProperty(value = "height") + private Integer height; + + /** + * Get x-coordinate of the top left point of the face. + * + * @return the left value + */ + public Integer left() { + return this.left; + } + + /** + * Set x-coordinate of the top left point of the face. + * + * @param left the left value to set + * @return the FaceRectangle object itself. + */ + public FaceRectangle withLeft(Integer left) { + this.left = left; + return this; + } + + /** + * Get y-coordinate of the top left point of the face. + * + * @return the top value + */ + public Integer top() { + return this.top; + } + + /** + * Set y-coordinate of the top left point of the face. + * + * @param top the top value to set + * @return the FaceRectangle object itself. + */ + public FaceRectangle withTop(Integer top) { + this.top = top; + return this; + } + + /** + * Get width measured from the top-left point of the face. + * + * @return the width value + */ + public Integer width() { + return this.width; + } + + /** + * Set width measured from the top-left point of the face. + * + * @param width the width value to set + * @return the FaceRectangle object itself. + */ + public FaceRectangle withWidth(Integer width) { + this.width = width; + return this; + } + + /** + * Get height measured from the top-left point of the face. + * + * @return the height value + */ + public Integer height() { + return this.height; + } + + /** + * Set height measured from the top-left point of the face. + * + * @param height the height value to set + * @return the FaceRectangle object itself. + */ + public FaceRectangle withHeight(Integer height) { + this.height = height; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Gender.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Gender.java new file mode 100644 index 00000000000..ee679ea9637 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Gender.java @@ -0,0 +1,53 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for Gender. + */ +public enum Gender { + /** Enum value Male. */ + MALE("Male"), + + /** Enum value Female. */ + FEMALE("Female"); + + /** The actual serialized value for a Gender instance. */ + private String value; + + Gender(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a Gender instance. + * + * @param value the serialized value to parse. + * @return the parsed Gender object, or null if unable to parse. + */ + @JsonCreator + public static Gender fromString(String value) { + Gender[] items = Gender.values(); + for (Gender item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageCaption.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageCaption.java new file mode 100644 index 00000000000..fc46449db0e --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageCaption.java @@ -0,0 +1,69 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An image caption, i.e. a brief description of what the image depicts. + */ +public class ImageCaption { + /** + * The text of the caption. + */ + @JsonProperty(value = "text") + private String text; + + /** + * The level of confidence the service has in the caption. + */ + @JsonProperty(value = "confidence") + private Double confidence; + + /** + * Get the text of the caption. + * + * @return the text value + */ + public String text() { + return this.text; + } + + /** + * Set the text of the caption. + * + * @param text the text value to set + * @return the ImageCaption object itself. + */ + public ImageCaption withText(String text) { + this.text = text; + return this; + } + + /** + * Get the level of confidence the service has in the caption. + * + * @return the confidence value + */ + public Double confidence() { + return this.confidence; + } + + /** + * Set the level of confidence the service has in the caption. + * + * @param confidence the confidence value to set + * @return the ImageCaption object itself. + */ + public ImageCaption withConfidence(Double confidence) { + this.confidence = confidence; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageDescriptionDetails.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageDescriptionDetails.java new file mode 100644 index 00000000000..ccde84db93d --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageDescriptionDetails.java @@ -0,0 +1,123 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * A collection of content tags, along with a list of captions sorted by + * confidence level, and image metadata. + */ +public class ImageDescriptionDetails { + /** + * A collection of image tags. + */ + @JsonProperty(value = "tags") + private List tags; + + /** + * A list of captions, sorted by confidence level. + */ + @JsonProperty(value = "captions") + private List captions; + + /** + * Id of the REST API request. + */ + @JsonProperty(value = "requestId") + private String requestId; + + /** + * The metadata property. + */ + @JsonProperty(value = "metadata") + private ImageMetadata metadata; + + /** + * Get a collection of image tags. + * + * @return the tags value + */ + public List tags() { + return this.tags; + } + + /** + * Set a collection of image tags. + * + * @param tags the tags value to set + * @return the ImageDescriptionDetails object itself. + */ + public ImageDescriptionDetails withTags(List tags) { + this.tags = tags; + return this; + } + + /** + * Get a list of captions, sorted by confidence level. + * + * @return the captions value + */ + public List captions() { + return this.captions; + } + + /** + * Set a list of captions, sorted by confidence level. + * + * @param captions the captions value to set + * @return the ImageDescriptionDetails object itself. + */ + public ImageDescriptionDetails withCaptions(List captions) { + this.captions = captions; + return this; + } + + /** + * Get id of the REST API request. + * + * @return the requestId value + */ + public String requestId() { + return this.requestId; + } + + /** + * Set id of the REST API request. + * + * @param requestId the requestId value to set + * @return the ImageDescriptionDetails object itself. + */ + public ImageDescriptionDetails withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value + */ + public ImageMetadata metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set + * @return the ImageDescriptionDetails object itself. + */ + public ImageDescriptionDetails withMetadata(ImageMetadata metadata) { + this.metadata = metadata; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageMetadata.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageMetadata.java new file mode 100644 index 00000000000..6b170165552 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageMetadata.java @@ -0,0 +1,95 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Image metadata. + */ +public class ImageMetadata { + /** + * Image width. + */ + @JsonProperty(value = "width") + private Integer width; + + /** + * Image height. + */ + @JsonProperty(value = "height") + private Integer height; + + /** + * Image format. + */ + @JsonProperty(value = "format") + private String format; + + /** + * Get image width. + * + * @return the width value + */ + public Integer width() { + return this.width; + } + + /** + * Set image width. + * + * @param width the width value to set + * @return the ImageMetadata object itself. + */ + public ImageMetadata withWidth(Integer width) { + this.width = width; + return this; + } + + /** + * Get image height. + * + * @return the height value + */ + public Integer height() { + return this.height; + } + + /** + * Set image height. + * + * @param height the height value to set + * @return the ImageMetadata object itself. + */ + public ImageMetadata withHeight(Integer height) { + this.height = height; + return this; + } + + /** + * Get image format. + * + * @return the format value + */ + public String format() { + return this.format; + } + + /** + * Set image format. + * + * @param format the format value to set + * @return the ImageMetadata object itself. + */ + public ImageMetadata withFormat(String format) { + this.format = format; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageTag.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageTag.java new file mode 100644 index 00000000000..71a0084407f --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageTag.java @@ -0,0 +1,69 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An image caption, i.e. a brief description of what the image depicts. + */ +public class ImageTag { + /** + * The tag value. + */ + @JsonProperty(value = "name") + private String name; + + /** + * The level of confidence the service has in the caption. + */ + @JsonProperty(value = "confidence") + private Double confidence; + + /** + * Get the tag value. + * + * @return the name value + */ + public String name() { + return this.name; + } + + /** + * Set the tag value. + * + * @param name the name value to set + * @return the ImageTag object itself. + */ + public ImageTag withName(String name) { + this.name = name; + return this; + } + + /** + * Get the level of confidence the service has in the caption. + * + * @return the confidence value + */ + public Double confidence() { + return this.confidence; + } + + /** + * Set the level of confidence the service has in the caption. + * + * @param confidence the confidence value to set + * @return the ImageTag object itself. + */ + public ImageTag withConfidence(Double confidence) { + this.confidence = confidence; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageType.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageType.java new file mode 100644 index 00000000000..15b9fc474dd --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageType.java @@ -0,0 +1,69 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An object providing possible image types and matching confidence levels. + */ +public class ImageType { + /** + * Confidence level that the image is a clip art. + */ + @JsonProperty(value = "clipArtType") + private Double clipArtType; + + /** + * Confidence level that the image is a line drawing. + */ + @JsonProperty(value = "lineDrawingType") + private Double lineDrawingType; + + /** + * Get confidence level that the image is a clip art. + * + * @return the clipArtType value + */ + public Double clipArtType() { + return this.clipArtType; + } + + /** + * Set confidence level that the image is a clip art. + * + * @param clipArtType the clipArtType value to set + * @return the ImageType object itself. + */ + public ImageType withClipArtType(Double clipArtType) { + this.clipArtType = clipArtType; + return this; + } + + /** + * Get confidence level that the image is a line drawing. + * + * @return the lineDrawingType value + */ + public Double lineDrawingType() { + return this.lineDrawingType; + } + + /** + * Set confidence level that the image is a line drawing. + * + * @param lineDrawingType the lineDrawingType value to set + * @return the ImageType object itself. + */ + public ImageType withLineDrawingType(Double lineDrawingType) { + this.lineDrawingType = lineDrawingType; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageUrl.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageUrl.java new file mode 100644 index 00000000000..9d20b79887d --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ImageUrl.java @@ -0,0 +1,43 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * The ImageUrl model. + */ +public class ImageUrl { + /** + * Publicly reachable URL of an image. + */ + @JsonProperty(value = "url", required = true) + private String url; + + /** + * Get publicly reachable URL of an image. + * + * @return the url value + */ + public String url() { + return this.url; + } + + /** + * Set publicly reachable URL of an image. + * + * @param url the url value to set + * @return the ImageUrl object itself. + */ + public ImageUrl withUrl(String url) { + this.url = url; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/LandmarkResults.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/LandmarkResults.java new file mode 100644 index 00000000000..e85d560290d --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/LandmarkResults.java @@ -0,0 +1,96 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * List of landmarks recognized in the image. + */ +public class LandmarkResults { + /** + * The landmarks property. + */ + @JsonProperty(value = "landmarks") + private List landmarks; + + /** + * Id of the REST API request. + */ + @JsonProperty(value = "requestId") + private String requestId; + + /** + * The metadata property. + */ + @JsonProperty(value = "metadata") + private ImageMetadata metadata; + + /** + * Get the landmarks value. + * + * @return the landmarks value + */ + public List landmarks() { + return this.landmarks; + } + + /** + * Set the landmarks value. + * + * @param landmarks the landmarks value to set + * @return the LandmarkResults object itself. + */ + public LandmarkResults withLandmarks(List landmarks) { + this.landmarks = landmarks; + return this; + } + + /** + * Get id of the REST API request. + * + * @return the requestId value + */ + public String requestId() { + return this.requestId; + } + + /** + * Set id of the REST API request. + * + * @param requestId the requestId value to set + * @return the LandmarkResults object itself. + */ + public LandmarkResults withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value + */ + public ImageMetadata metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set + * @return the LandmarkResults object itself. + */ + public LandmarkResults withMetadata(ImageMetadata metadata) { + this.metadata = metadata; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/LandmarkResultsLandmarksItem.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/LandmarkResultsLandmarksItem.java new file mode 100644 index 00000000000..118544faf28 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/LandmarkResultsLandmarksItem.java @@ -0,0 +1,69 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * A landmark recognized in the image. + */ +public class LandmarkResultsLandmarksItem { + /** + * Name of the landmark. + */ + @JsonProperty(value = "name") + private String name; + + /** + * Confidence level for the landmark recognition. + */ + @JsonProperty(value = "confidence") + private Double confidence; + + /** + * Get name of the landmark. + * + * @return the name value + */ + public String name() { + return this.name; + } + + /** + * Set name of the landmark. + * + * @param name the name value to set + * @return the LandmarkResultsLandmarksItem object itself. + */ + public LandmarkResultsLandmarksItem withName(String name) { + this.name = name; + return this; + } + + /** + * Get confidence level for the landmark recognition. + * + * @return the confidence value + */ + public Double confidence() { + return this.confidence; + } + + /** + * Set confidence level for the landmark recognition. + * + * @param confidence the confidence value to set + * @return the LandmarkResultsLandmarksItem object itself. + */ + public LandmarkResultsLandmarksItem withConfidence(Double confidence) { + this.confidence = confidence; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Line.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Line.java new file mode 100644 index 00000000000..c26e91f1202 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Line.java @@ -0,0 +1,96 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * The Line model. + */ +public class Line { + /** + * The boundingBox property. + */ + @JsonProperty(value = "boundingBox") + private List boundingBox; + + /** + * The text property. + */ + @JsonProperty(value = "text") + private String text; + + /** + * The words property. + */ + @JsonProperty(value = "words") + private List words; + + /** + * Get the boundingBox value. + * + * @return the boundingBox value + */ + public List boundingBox() { + return this.boundingBox; + } + + /** + * Set the boundingBox value. + * + * @param boundingBox the boundingBox value to set + * @return the Line object itself. + */ + public Line withBoundingBox(List boundingBox) { + this.boundingBox = boundingBox; + return this; + } + + /** + * Get the text value. + * + * @return the text value + */ + public String text() { + return this.text; + } + + /** + * Set the text value. + * + * @param text the text value to set + * @return the Line object itself. + */ + public Line withText(String text) { + this.text = text; + return this; + } + + /** + * Get the words value. + * + * @return the words value + */ + public List words() { + return this.words; + } + + /** + * Set the words value. + * + * @param words the words value to set + * @return the Line object itself. + */ + public Line withWords(List words) { + this.words = words; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ModelDescription.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ModelDescription.java new file mode 100644 index 00000000000..350d36127b2 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/ModelDescription.java @@ -0,0 +1,70 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An object describing supported model by name and categories. + */ +public class ModelDescription { + /** + * The name property. + */ + @JsonProperty(value = "name") + private String name; + + /** + * The categories property. + */ + @JsonProperty(value = "categories") + private List categories; + + /** + * Get the name value. + * + * @return the name value + */ + public String name() { + return this.name; + } + + /** + * Set the name value. + * + * @param name the name value to set + * @return the ModelDescription object itself. + */ + public ModelDescription withName(String name) { + this.name = name; + return this; + } + + /** + * Get the categories value. + * + * @return the categories value + */ + public List categories() { + return this.categories; + } + + /** + * Set the categories value. + * + * @param categories the categories value to set + * @return the ModelDescription object itself. + */ + public ModelDescription withCategories(List categories) { + this.categories = categories; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrLanguages.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrLanguages.java new file mode 100644 index 00000000000..47aa714f806 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrLanguages.java @@ -0,0 +1,128 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for OcrLanguages. + */ +public enum OcrLanguages { + /** Enum value unk. */ + UNK("unk"), + + /** Enum value zh-Hans. */ + ZH_HANS("zh-Hans"), + + /** Enum value zh-Hant. */ + ZH_HANT("zh-Hant"), + + /** Enum value cs. */ + CS("cs"), + + /** Enum value da. */ + DA("da"), + + /** Enum value nl. */ + NL("nl"), + + /** Enum value en. */ + EN("en"), + + /** Enum value fi. */ + FI("fi"), + + /** Enum value fr. */ + FR("fr"), + + /** Enum value de. */ + DE("de"), + + /** Enum value el. */ + EL("el"), + + /** Enum value hu. */ + HU("hu"), + + /** Enum value it. */ + IT("it"), + + /** Enum value ja. */ + JA("ja"), + + /** Enum value ko. */ + KO("ko"), + + /** Enum value nb. */ + NB("nb"), + + /** Enum value pl. */ + PL("pl"), + + /** Enum value pt. */ + PT("pt"), + + /** Enum value ru. */ + RU("ru"), + + /** Enum value es. */ + ES("es"), + + /** Enum value sv. */ + SV("sv"), + + /** Enum value tr. */ + TR("tr"), + + /** Enum value ar. */ + AR("ar"), + + /** Enum value ro. */ + RO("ro"), + + /** Enum value sr-Cyrl. */ + SR_CYRL("sr-Cyrl"), + + /** Enum value sr-Latn. */ + SR_LATN("sr-Latn"), + + /** Enum value sk. */ + SK("sk"); + + /** The actual serialized value for a OcrLanguages instance. */ + private String value; + + OcrLanguages(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a OcrLanguages instance. + * + * @param value the serialized value to parse. + * @return the parsed OcrLanguages object, or null if unable to parse. + */ + @JsonCreator + public static OcrLanguages fromString(String value) { + OcrLanguages[] items = OcrLanguages.values(); + for (OcrLanguages item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrLine.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrLine.java new file mode 100644 index 00000000000..47e4958c847 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrLine.java @@ -0,0 +1,75 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * An object describing a single recognized line of text. + */ +public class OcrLine { + /** + * Bounding box of a recognized line. The four integers represent the + * x-coordinate of the left edge, the y-coordinate of the top edge, width, + * and height of the bounding box, in the coordinate system of the input + * image, after it has been rotated around its center according to the + * detected text angle (see textAngle property), with the origin at the + * top-left corner, and the y-axis pointing down. + */ + @JsonProperty(value = "boundingBox") + private String boundingBox; + + /** + * An array of objects, where each object represents a recognized word. + */ + @JsonProperty(value = "words") + private List words; + + /** + * Get bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. + * + * @return the boundingBox value + */ + public String boundingBox() { + return this.boundingBox; + } + + /** + * Set bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. + * + * @param boundingBox the boundingBox value to set + * @return the OcrLine object itself. + */ + public OcrLine withBoundingBox(String boundingBox) { + this.boundingBox = boundingBox; + return this; + } + + /** + * Get an array of objects, where each object represents a recognized word. + * + * @return the words value + */ + public List words() { + return this.words; + } + + /** + * Set an array of objects, where each object represents a recognized word. + * + * @param words the words value to set + * @return the OcrLine object itself. + */ + public OcrLine withWords(List words) { + this.words = words; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrRegion.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrRegion.java new file mode 100644 index 00000000000..03fbd4299f0 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrRegion.java @@ -0,0 +1,76 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * A region consists of multiple lines (e.g. a column of text in a multi-column + * document). + */ +public class OcrRegion { + /** + * Bounding box of a recognized region. The four integers represent the + * x-coordinate of the left edge, the y-coordinate of the top edge, width, + * and height of the bounding box, in the coordinate system of the input + * image, after it has been rotated around its center according to the + * detected text angle (see textAngle property), with the origin at the + * top-left corner, and the y-axis pointing down. + */ + @JsonProperty(value = "boundingBox") + private String boundingBox; + + /** + * The lines property. + */ + @JsonProperty(value = "lines") + private List lines; + + /** + * Get bounding box of a recognized region. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. + * + * @return the boundingBox value + */ + public String boundingBox() { + return this.boundingBox; + } + + /** + * Set bounding box of a recognized region. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. + * + * @param boundingBox the boundingBox value to set + * @return the OcrRegion object itself. + */ + public OcrRegion withBoundingBox(String boundingBox) { + this.boundingBox = boundingBox; + return this; + } + + /** + * Get the lines value. + * + * @return the lines value + */ + public List lines() { + return this.lines; + } + + /** + * Set the lines value. + * + * @param lines the lines value to set + * @return the OcrRegion object itself. + */ + public OcrRegion withLines(List lines) { + this.lines = lines; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrWord.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrWord.java new file mode 100644 index 00000000000..947bbd1b10c --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/OcrWord.java @@ -0,0 +1,74 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Information on a recognized word. + */ +public class OcrWord { + /** + * Bounding box of a recognized word. The four integers represent the + * x-coordinate of the left edge, the y-coordinate of the top edge, width, + * and height of the bounding box, in the coordinate system of the input + * image, after it has been rotated around its center according to the + * detected text angle (see textAngle property), with the origin at the + * top-left corner, and the y-axis pointing down. + */ + @JsonProperty(value = "boundingBox") + private String boundingBox; + + /** + * String value of a recognized word. + */ + @JsonProperty(value = "text") + private String text; + + /** + * Get bounding box of a recognized word. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. + * + * @return the boundingBox value + */ + public String boundingBox() { + return this.boundingBox; + } + + /** + * Set bounding box of a recognized word. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. + * + * @param boundingBox the boundingBox value to set + * @return the OcrWord object itself. + */ + public OcrWord withBoundingBox(String boundingBox) { + this.boundingBox = boundingBox; + return this; + } + + /** + * Get string value of a recognized word. + * + * @return the text value + */ + public String text() { + return this.text; + } + + /** + * Set string value of a recognized word. + * + * @param text the text value to set + * @return the OcrWord object itself. + */ + public OcrWord withText(String text) { + this.text = text; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/RecognitionResult.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/RecognitionResult.java new file mode 100644 index 00000000000..0bbcb770a7f --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/RecognitionResult.java @@ -0,0 +1,44 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * The RecognitionResult model. + */ +public class RecognitionResult { + /** + * The lines property. + */ + @JsonProperty(value = "lines") + private List lines; + + /** + * Get the lines value. + * + * @return the lines value + */ + public List lines() { + return this.lines; + } + + /** + * Set the lines value. + * + * @param lines the lines value to set + * @return the RecognitionResult object itself. + */ + public RecognitionResult withLines(List lines) { + this.lines = lines; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/RecognizeTextHeaders.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/RecognizeTextHeaders.java new file mode 100644 index 00000000000..8c45f1bfffd --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/RecognizeTextHeaders.java @@ -0,0 +1,44 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Defines headers for RecognizeText operation. + */ +public class RecognizeTextHeaders { + /** + * URL to query for status of the operation. The operation ID will expire + * in 48 hours. + */ + @JsonProperty(value = "Operation-Location") + private String operationLocation; + + /** + * Get uRL to query for status of the operation. The operation ID will expire in 48 hours. + * + * @return the operationLocation value + */ + public String operationLocation() { + return this.operationLocation; + } + + /** + * Set uRL to query for status of the operation. The operation ID will expire in 48 hours. + * + * @param operationLocation the operationLocation value to set + * @return the RecognizeTextHeaders object itself. + */ + public RecognizeTextHeaders withOperationLocation(String operationLocation) { + this.operationLocation = operationLocation; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/RecognizeTextInStreamHeaders.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/RecognizeTextInStreamHeaders.java new file mode 100644 index 00000000000..6f46e2aefdd --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/RecognizeTextInStreamHeaders.java @@ -0,0 +1,44 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Defines headers for RecognizeTextInStream operation. + */ +public class RecognizeTextInStreamHeaders { + /** + * URL to query for status of the operation. The operation ID will expire + * in 48 hours. + */ + @JsonProperty(value = "Operation-Location") + private String operationLocation; + + /** + * Get uRL to query for status of the operation. The operation ID will expire in 48 hours. + * + * @return the operationLocation value + */ + public String operationLocation() { + return this.operationLocation; + } + + /** + * Set uRL to query for status of the operation. The operation ID will expire in 48 hours. + * + * @param operationLocation the operationLocation value to set + * @return the RecognizeTextInStreamHeaders object itself. + */ + public RecognizeTextInStreamHeaders withOperationLocation(String operationLocation) { + this.operationLocation = operationLocation; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/TextOperationStatusCodes.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/TextOperationStatusCodes.java new file mode 100644 index 00000000000..e5876856d3e --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/TextOperationStatusCodes.java @@ -0,0 +1,59 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for TextOperationStatusCodes. + */ +public enum TextOperationStatusCodes { + /** Enum value Not Started. */ + NOT_STARTED("Not Started"), + + /** Enum value Running. */ + RUNNING("Running"), + + /** Enum value Failed. */ + FAILED("Failed"), + + /** Enum value Succeeded. */ + SUCCEEDED("Succeeded"); + + /** The actual serialized value for a TextOperationStatusCodes instance. */ + private String value; + + TextOperationStatusCodes(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a TextOperationStatusCodes instance. + * + * @param value the serialized value to parse. + * @return the parsed TextOperationStatusCodes object, or null if unable to parse. + */ + @JsonCreator + public static TextOperationStatusCodes fromString(String value) { + TextOperationStatusCodes[] items = TextOperationStatusCodes.values(); + for (TextOperationStatusCodes item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/VisualFeatureTypes.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/VisualFeatureTypes.java new file mode 100644 index 00000000000..f8e10d93ba4 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/VisualFeatureTypes.java @@ -0,0 +1,68 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonValue; + +/** + * Defines values for VisualFeatureTypes. + */ +public enum VisualFeatureTypes { + /** Enum value ImageType. */ + IMAGE_TYPE("ImageType"), + + /** Enum value Faces. */ + FACES("Faces"), + + /** Enum value Adult. */ + ADULT("Adult"), + + /** Enum value Categories. */ + CATEGORIES("Categories"), + + /** Enum value Color. */ + COLOR("Color"), + + /** Enum value Tags. */ + TAGS("Tags"), + + /** Enum value Description. */ + DESCRIPTION("Description"); + + /** The actual serialized value for a VisualFeatureTypes instance. */ + private String value; + + VisualFeatureTypes(String value) { + this.value = value; + } + + /** + * Parses a serialized value to a VisualFeatureTypes instance. + * + * @param value the serialized value to parse. + * @return the parsed VisualFeatureTypes object, or null if unable to parse. + */ + @JsonCreator + public static VisualFeatureTypes fromString(String value) { + VisualFeatureTypes[] items = VisualFeatureTypes.values(); + for (VisualFeatureTypes item : items) { + if (item.toString().equalsIgnoreCase(value)) { + return item; + } + } + return null; + } + + @JsonValue + @Override + public String toString() { + return this.value; + } +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Word.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Word.java new file mode 100644 index 00000000000..a3fa7995332 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/Word.java @@ -0,0 +1,70 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision; + +import java.util.List; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * The Word model. + */ +public class Word { + /** + * The boundingBox property. + */ + @JsonProperty(value = "boundingBox") + private List boundingBox; + + /** + * The text property. + */ + @JsonProperty(value = "text") + private String text; + + /** + * Get the boundingBox value. + * + * @return the boundingBox value + */ + public List boundingBox() { + return this.boundingBox; + } + + /** + * Set the boundingBox value. + * + * @param boundingBox the boundingBox value to set + * @return the Word object itself. + */ + public Word withBoundingBox(List boundingBox) { + this.boundingBox = boundingBox; + return this; + } + + /** + * Get the text value. + * + * @return the text value + */ + public String text() { + return this.text; + } + + /** + * Set the text value. + * + * @param text the text value to set + * @return the Word object itself. + */ + public Word withText(String text) { + this.text = text; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionAPIImpl.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionAPIImpl.java new file mode 100644 index 00000000000..222611254b6 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ComputerVisionAPIImpl.java @@ -0,0 +1,2708 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; + +import com.google.common.base.Joiner; +import com.google.common.reflect.TypeToken; +import com.microsoft.azure.AzureClient; +import com.microsoft.azure.AzureServiceClient; +import com.microsoft.azure.CloudException; +import com.microsoft.azure.cognitiveservices.vision.computervision.AzureRegions; +import com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionErrorException; +import com.microsoft.azure.cognitiveservices.vision.computervision.Details; +import com.microsoft.azure.cognitiveservices.vision.computervision.ImageUrl; +import com.microsoft.azure.cognitiveservices.vision.computervision.OcrLanguages; +import com.microsoft.azure.cognitiveservices.vision.computervision.RecognizeTextHeaders; +import com.microsoft.azure.cognitiveservices.vision.computervision.RecognizeTextInStreamHeaders; +import com.microsoft.azure.cognitiveservices.vision.computervision.VisualFeatureTypes; +import com.microsoft.rest.CollectionFormat; +import com.microsoft.rest.credentials.ServiceClientCredentials; +import com.microsoft.rest.RestClient; +import com.microsoft.rest.ServiceCallback; +import com.microsoft.rest.ServiceFuture; +import com.microsoft.rest.ServiceResponse; +import com.microsoft.rest.ServiceResponseWithHeaders; +import com.microsoft.rest.Validator; +import java.io.InputStream; +import java.io.IOException; +import java.util.List; +import okhttp3.MediaType; +import okhttp3.RequestBody; +import okhttp3.ResponseBody; +import retrofit2.http.Body; +import retrofit2.http.GET; +import retrofit2.http.Header; +import retrofit2.http.Headers; +import retrofit2.http.Path; +import retrofit2.http.POST; +import retrofit2.http.Query; +import retrofit2.http.Streaming; +import retrofit2.Response; +import rx.functions.Func1; +import rx.Observable; + +/** + * Initializes a new instance of the ComputerVisionAPIImpl class. + */ +public class ComputerVisionAPIImpl extends AzureServiceClient { + /** The Retrofit service to perform REST calls. */ + private ComputerVisionAPIService service; + /** the {@link AzureClient} used for long running operations. */ + private AzureClient azureClient; + + /** + * Gets the {@link AzureClient} used for long running operations. + * @return the azure client; + */ + public AzureClient getAzureClient() { + return this.azureClient; + } + + /** Supported Azure regions for Cognitive Services endpoints. Possible values include: 'westus', 'westeurope', 'southeastasia', 'eastus2', 'westcentralus', 'westus2', 'eastus', 'southcentralus', 'northeurope', 'eastasia', 'australiaeast', 'brazilsouth'. */ + private AzureRegions azureRegion; + + /** + * Gets Supported Azure regions for Cognitive Services endpoints. Possible values include: 'westus', 'westeurope', 'southeastasia', 'eastus2', 'westcentralus', 'westus2', 'eastus', 'southcentralus', 'northeurope', 'eastasia', 'australiaeast', 'brazilsouth'. + * + * @return the azureRegion value. + */ + public AzureRegions azureRegion() { + return this.azureRegion; + } + + /** + * Sets Supported Azure regions for Cognitive Services endpoints. Possible values include: 'westus', 'westeurope', 'southeastasia', 'eastus2', 'westcentralus', 'westus2', 'eastus', 'southcentralus', 'northeurope', 'eastasia', 'australiaeast', 'brazilsouth'. + * + * @param azureRegion the azureRegion value. + * @return the service client itself + */ + public ComputerVisionAPIImpl withAzureRegion(AzureRegions azureRegion) { + this.azureRegion = azureRegion; + return this; + } + + /** Gets or sets the preferred language for the response. */ + private String acceptLanguage; + + /** + * Gets Gets or sets the preferred language for the response. + * + * @return the acceptLanguage value. + */ + public String acceptLanguage() { + return this.acceptLanguage; + } + + /** + * Sets Gets or sets the preferred language for the response. + * + * @param acceptLanguage the acceptLanguage value. + * @return the service client itself + */ + public ComputerVisionAPIImpl withAcceptLanguage(String acceptLanguage) { + this.acceptLanguage = acceptLanguage; + return this; + } + + /** Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30. */ + private int longRunningOperationRetryTimeout; + + /** + * Gets Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30. + * + * @return the longRunningOperationRetryTimeout value. + */ + public int longRunningOperationRetryTimeout() { + return this.longRunningOperationRetryTimeout; + } + + /** + * Sets Gets or sets the retry timeout in seconds for Long Running Operations. Default value is 30. + * + * @param longRunningOperationRetryTimeout the longRunningOperationRetryTimeout value. + * @return the service client itself + */ + public ComputerVisionAPIImpl withLongRunningOperationRetryTimeout(int longRunningOperationRetryTimeout) { + this.longRunningOperationRetryTimeout = longRunningOperationRetryTimeout; + return this; + } + + /** When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true. */ + private boolean generateClientRequestId; + + /** + * Gets When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true. + * + * @return the generateClientRequestId value. + */ + public boolean generateClientRequestId() { + return this.generateClientRequestId; + } + + /** + * Sets When set to true a unique x-ms-client-request-id value is generated and included in each request. Default is true. + * + * @param generateClientRequestId the generateClientRequestId value. + * @return the service client itself + */ + public ComputerVisionAPIImpl withGenerateClientRequestId(boolean generateClientRequestId) { + this.generateClientRequestId = generateClientRequestId; + return this; + } + + /** + * Initializes an instance of ComputerVisionAPI client. + * + * @param credentials the management credentials for Azure + */ + public ComputerVisionAPIImpl(ServiceClientCredentials credentials) { + this("https://{AzureRegion}.api.cognitive.microsoft.com/vision/v1.0", credentials); + } + + /** + * Initializes an instance of ComputerVisionAPI client. + * + * @param baseUrl the base URL of the host + * @param credentials the management credentials for Azure + */ + private ComputerVisionAPIImpl(String baseUrl, ServiceClientCredentials credentials) { + super(baseUrl, credentials); + initialize(); + } + + /** + * Initializes an instance of ComputerVisionAPI client. + * + * @param restClient the REST client to connect to Azure. + */ + public ComputerVisionAPIImpl(RestClient restClient) { + super(restClient); + initialize(); + } + + protected void initialize() { + this.acceptLanguage = "en-US"; + this.longRunningOperationRetryTimeout = 30; + this.generateClientRequestId = true; + this.azureClient = new AzureClient(this); + initializeService(); + } + + /** + * Gets the User-Agent header for the client. + * + * @return the user agent string. + */ + @Override + public String userAgent() { + return String.format("%s (%s, %s)", super.userAgent(), "ComputerVisionAPI", "1.0"); + } + + private void initializeService() { + service = restClient().retrofit().create(ComputerVisionAPIService.class); + } + + /** + * The interface defining all the services for ComputerVisionAPI to be + * used by Retrofit to perform actually REST calls. + */ + interface ComputerVisionAPIService { + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI listModels" }) + @GET("models") + Observable> listModels(@Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI analyzeImage" }) + @POST("analyze") + Observable> analyzeImage(@Query("visualFeatures") String visualFeatures, @Query("details") String details, @Query("language") String language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI generateThumbnail" }) + @POST("generateThumbnail") + @Streaming + Observable> generateThumbnail(@Query("width") int width, @Query("height") int height, @Query("smartCropping") Boolean smartCropping, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI recognizePrintedText" }) + @POST("ocr") + Observable> recognizePrintedText(@Query("detectOrientation") boolean detectOrientation, @Query("language") OcrLanguages language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI describeImage" }) + @POST("describe") + Observable> describeImage(@Query("maxCandidates") String maxCandidates, @Query("language") String language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI tagImage" }) + @POST("tag") + Observable> tagImage(@Query("language") String language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI analyzeImageByDomain" }) + @POST("models/{model}/analyze") + Observable> analyzeImageByDomain(@Path("model") String model, @Query("language") String language, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI recognizeText" }) + @POST("recognizeText") + Observable> recognizeText(@Query("detectHandwriting") Boolean detectHandwriting, @Header("accept-language") String acceptLanguage, @Body ImageUrl imageUrl, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/json; charset=utf-8", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI getTextOperationResult" }) + @GET("textOperations/{operationId}") + Observable> getTextOperationResult(@Path("operationId") String operationId, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI analyzeImageInStream" }) + @POST("analyze") + Observable> analyzeImageInStream(@Query("visualFeatures") String visualFeatures, @Query("details") String details, @Query("language") String language, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI generateThumbnailInStream" }) + @POST("generateThumbnail") + @Streaming + Observable> generateThumbnailInStream(@Query("width") int width, @Query("height") int height, @Body RequestBody image, @Query("smartCropping") Boolean smartCropping, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI recognizePrintedTextInStream" }) + @POST("ocr") + Observable> recognizePrintedTextInStream(@Query("language") OcrLanguages language, @Query("detectOrientation") boolean detectOrientation, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI describeImageInStream" }) + @POST("describe") + Observable> describeImageInStream(@Query("maxCandidates") String maxCandidates, @Query("language") String language, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI tagImageInStream" }) + @POST("tag") + Observable> tagImageInStream(@Query("language") String language, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI analyzeImageByDomainInStream" }) + @POST("models/{model}/analyze") + Observable> analyzeImageByDomainInStream(@Path("model") String model, @Query("language") String language, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + @Headers({ "Content-Type: application/octet-stream", "x-ms-logging-context: com.microsoft.azure.cognitiveservices.vision.computervision.ComputerVisionAPI recognizeTextInStream" }) + @POST("recognizeText") + Observable> recognizeTextInStream(@Query("detectHandwriting") Boolean detectHandwriting, @Body RequestBody image, @Header("accept-language") String acceptLanguage, @Header("x-ms-parameterized-host") String parameterizedHost, @Header("User-Agent") String userAgent); + + } + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ListModelsResultInner object if successful. + */ + public ListModelsResultInner listModels() { + return listModelsWithServiceResponseAsync().toBlocking().single().body(); + } + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture listModelsAsync(final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(listModelsWithServiceResponseAsync(), serviceCallback); + } + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ListModelsResultInner object + */ + public Observable listModelsAsync() { + return listModelsWithServiceResponseAsync().map(new Func1, ListModelsResultInner>() { + @Override + public ListModelsResultInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API only supports one domain-specific model: a celebrity recognizer. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ListModelsResultInner object + */ + public Observable> listModelsWithServiceResponseAsync() { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.listModels(this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = listModelsDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse listModelsDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysisInner object if successful. + */ + public ImageAnalysisInner analyzeImage(String url) { + return analyzeImageWithServiceResponseAsync(url).toBlocking().single().body(); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageAsync(String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageWithServiceResponseAsync(url), serviceCallback); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysisInner object + */ + public Observable analyzeImageAsync(String url) { + return analyzeImageWithServiceResponseAsync(url).map(new Func1, ImageAnalysisInner>() { + @Override + public ImageAnalysisInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysisInner object + */ + public Observable> analyzeImageWithServiceResponseAsync(String url) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final List visualFeatures = null; + final List
details = null; + final String language = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);String detailsConverted = this.serializerAdapter().serializeList(details, CollectionFormat.CSV); + return service.analyzeImage(visualFeaturesConverted, detailsConverted, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysisInner object if successful. + */ + public ImageAnalysisInner analyzeImage(String url, List visualFeatures, List
details, String language) { + return analyzeImageWithServiceResponseAsync(url, visualFeatures, details, language).toBlocking().single().body(); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageAsync(String url, List visualFeatures, List
details, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageWithServiceResponseAsync(url, visualFeatures, details, language), serviceCallback); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysisInner object + */ + public Observable analyzeImageAsync(String url, List visualFeatures, List
details, String language) { + return analyzeImageWithServiceResponseAsync(url, visualFeatures, details, language).map(new Func1, ImageAnalysisInner>() { + @Override + public ImageAnalysisInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response. + * + * @param url Publicly reachable URL of an image + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysisInner object + */ + public Observable> analyzeImageWithServiceResponseAsync(String url, List visualFeatures, List
details, String language) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + Validator.validate(visualFeatures); + Validator.validate(details); + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);String detailsConverted = this.serializerAdapter().serializeList(details, CollectionFormat.CSV); + return service.analyzeImage(visualFeaturesConverted, detailsConverted, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse analyzeImageDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + public InputStream generateThumbnail(int width, int height, String url) { + return generateThumbnailWithServiceResponseAsync(width, height, url).toBlocking().single().body(); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture generateThumbnailAsync(int width, int height, String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(generateThumbnailWithServiceResponseAsync(width, height, url), serviceCallback); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable generateThumbnailAsync(int width, int height, String url) { + return generateThumbnailWithServiceResponseAsync(width, height, url).map(new Func1, InputStream>() { + @Override + public InputStream call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable> generateThumbnailWithServiceResponseAsync(int width, int height, String url) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final Boolean smartCropping = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.generateThumbnail(width, height, smartCropping, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = generateThumbnailDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + public InputStream generateThumbnail(int width, int height, String url, Boolean smartCropping) { + return generateThumbnailWithServiceResponseAsync(width, height, url, smartCropping).toBlocking().single().body(); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture generateThumbnailAsync(int width, int height, String url, Boolean smartCropping, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(generateThumbnailWithServiceResponseAsync(width, height, url, smartCropping), serviceCallback); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable generateThumbnailAsync(int width, int height, String url, Boolean smartCropping) { + return generateThumbnailWithServiceResponseAsync(width, height, url, smartCropping).map(new Func1, InputStream>() { + @Override + public InputStream call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param url Publicly reachable URL of an image + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable> generateThumbnailWithServiceResponseAsync(int width, int height, String url, Boolean smartCropping) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.generateThumbnail(width, height, smartCropping, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = generateThumbnailDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse generateThumbnailDelegate(Response response) throws CloudException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(CloudException.class) + .build(response); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResultInner object if successful. + */ + public OcrResultInner recognizePrintedText(boolean detectOrientation, String url) { + return recognizePrintedTextWithServiceResponseAsync(detectOrientation, url).toBlocking().single().body(); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizePrintedTextAsync(boolean detectOrientation, String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(recognizePrintedTextWithServiceResponseAsync(detectOrientation, url), serviceCallback); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResultInner object + */ + public Observable recognizePrintedTextAsync(boolean detectOrientation, String url) { + return recognizePrintedTextWithServiceResponseAsync(detectOrientation, url).map(new Func1, OcrResultInner>() { + @Override + public OcrResultInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResultInner object + */ + public Observable> recognizePrintedTextWithServiceResponseAsync(boolean detectOrientation, String url) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final OcrLanguages language = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.recognizePrintedText(detectOrientation, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = recognizePrintedTextDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResultInner object if successful. + */ + public OcrResultInner recognizePrintedText(boolean detectOrientation, String url, OcrLanguages language) { + return recognizePrintedTextWithServiceResponseAsync(detectOrientation, url, language).toBlocking().single().body(); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizePrintedTextAsync(boolean detectOrientation, String url, OcrLanguages language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(recognizePrintedTextWithServiceResponseAsync(detectOrientation, url, language), serviceCallback); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResultInner object + */ + public Observable recognizePrintedTextAsync(boolean detectOrientation, String url, OcrLanguages language) { + return recognizePrintedTextWithServiceResponseAsync(detectOrientation, url, language).map(new Func1, OcrResultInner>() { + @Override + public OcrResultInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param url Publicly reachable URL of an image + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResultInner object + */ + public Observable> recognizePrintedTextWithServiceResponseAsync(boolean detectOrientation, String url, OcrLanguages language) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.recognizePrintedText(detectOrientation, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = recognizePrintedTextDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse recognizePrintedTextDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescriptionInner object if successful. + */ + public ImageDescriptionInner describeImage(String url) { + return describeImageWithServiceResponseAsync(url).toBlocking().single().body(); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture describeImageAsync(String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(describeImageWithServiceResponseAsync(url), serviceCallback); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescriptionInner object + */ + public Observable describeImageAsync(String url) { + return describeImageWithServiceResponseAsync(url).map(new Func1, ImageDescriptionInner>() { + @Override + public ImageDescriptionInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescriptionInner object + */ + public Observable> describeImageWithServiceResponseAsync(String url) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final String maxCandidates = null; + final String language = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.describeImage(maxCandidates, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = describeImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescriptionInner object if successful. + */ + public ImageDescriptionInner describeImage(String url, String maxCandidates, String language) { + return describeImageWithServiceResponseAsync(url, maxCandidates, language).toBlocking().single().body(); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture describeImageAsync(String url, String maxCandidates, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(describeImageWithServiceResponseAsync(url, maxCandidates, language), serviceCallback); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescriptionInner object + */ + public Observable describeImageAsync(String url, String maxCandidates, String language) { + return describeImageWithServiceResponseAsync(url, maxCandidates, language).map(new Func1, ImageDescriptionInner>() { + @Override + public ImageDescriptionInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param url Publicly reachable URL of an image + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescriptionInner object + */ + public Observable> describeImageWithServiceResponseAsync(String url, String maxCandidates, String language) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.describeImage(maxCandidates, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = describeImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse describeImageDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResultInner object if successful. + */ + public TagResultInner tagImage(String url) { + return tagImageWithServiceResponseAsync(url).toBlocking().single().body(); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture tagImageAsync(String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(tagImageWithServiceResponseAsync(url), serviceCallback); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResultInner object + */ + public Observable tagImageAsync(String url) { + return tagImageWithServiceResponseAsync(url).map(new Func1, TagResultInner>() { + @Override + public TagResultInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResultInner object + */ + public Observable> tagImageWithServiceResponseAsync(String url) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final String language = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.tagImage(language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = tagImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResultInner object if successful. + */ + public TagResultInner tagImage(String url, String language) { + return tagImageWithServiceResponseAsync(url, language).toBlocking().single().body(); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture tagImageAsync(String url, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(tagImageWithServiceResponseAsync(url, language), serviceCallback); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResultInner object + */ + public Observable tagImageAsync(String url, String language) { + return tagImageWithServiceResponseAsync(url, language).map(new Func1, TagResultInner>() { + @Override + public TagResultInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResultInner object + */ + public Observable> tagImageWithServiceResponseAsync(String url, String language) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.tagImage(language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = tagImageDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse tagImageDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResultsInner object if successful. + */ + public DomainModelResultsInner analyzeImageByDomain(String model, String url) { + return analyzeImageByDomainWithServiceResponseAsync(model, url).toBlocking().single().body(); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageByDomainAsync(String model, String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageByDomainWithServiceResponseAsync(model, url), serviceCallback); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResultsInner object + */ + public Observable analyzeImageByDomainAsync(String model, String url) { + return analyzeImageByDomainWithServiceResponseAsync(model, url).map(new Func1, DomainModelResultsInner>() { + @Override + public DomainModelResultsInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResultsInner object + */ + public Observable> analyzeImageByDomainWithServiceResponseAsync(String model, String url) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (model == null) { + throw new IllegalArgumentException("Parameter model is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final String language = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.analyzeImageByDomain(model, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageByDomainDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResultsInner object if successful. + */ + public DomainModelResultsInner analyzeImageByDomain(String model, String url, String language) { + return analyzeImageByDomainWithServiceResponseAsync(model, url, language).toBlocking().single().body(); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageByDomainAsync(String model, String url, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageByDomainWithServiceResponseAsync(model, url, language), serviceCallback); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResultsInner object + */ + public Observable analyzeImageByDomainAsync(String model, String url, String language) { + return analyzeImageByDomainWithServiceResponseAsync(model, url, language).map(new Func1, DomainModelResultsInner>() { + @Override + public DomainModelResultsInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param url Publicly reachable URL of an image + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResultsInner object + */ + public Observable> analyzeImageByDomainWithServiceResponseAsync(String model, String url, String language) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (model == null) { + throw new IllegalArgumentException("Parameter model is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.analyzeImageByDomain(model, language, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageByDomainDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse analyzeImageByDomainDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + */ + public void recognizeText(String url) { + recognizeTextWithServiceResponseAsync(url).toBlocking().single().body(); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param url Publicly reachable URL of an image + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizeTextAsync(String url, final ServiceCallback serviceCallback) { + return ServiceFuture.fromHeaderResponse(recognizeTextWithServiceResponseAsync(url), serviceCallback); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable recognizeTextAsync(String url) { + return recognizeTextWithServiceResponseAsync(url).map(new Func1, Void>() { + @Override + public Void call(ServiceResponseWithHeaders response) { + return response.body(); + } + }); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param url Publicly reachable URL of an image + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable> recognizeTextWithServiceResponseAsync(String url) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + final Boolean detectHandwriting = null; + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.recognizeText(detectHandwriting, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponseWithHeaders clientResponse = recognizeTextDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param url Publicly reachable URL of an image + * @param detectHandwriting If 'true' is specified, handwriting recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + */ + public void recognizeText(String url, Boolean detectHandwriting) { + recognizeTextWithServiceResponseAsync(url, detectHandwriting).toBlocking().single().body(); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param url Publicly reachable URL of an image + * @param detectHandwriting If 'true' is specified, handwriting recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizeTextAsync(String url, Boolean detectHandwriting, final ServiceCallback serviceCallback) { + return ServiceFuture.fromHeaderResponse(recognizeTextWithServiceResponseAsync(url, detectHandwriting), serviceCallback); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param url Publicly reachable URL of an image + * @param detectHandwriting If 'true' is specified, handwriting recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable recognizeTextAsync(String url, Boolean detectHandwriting) { + return recognizeTextWithServiceResponseAsync(url, detectHandwriting).map(new Func1, Void>() { + @Override + public Void call(ServiceResponseWithHeaders response) { + return response.body(); + } + }); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param url Publicly reachable URL of an image + * @param detectHandwriting If 'true' is specified, handwriting recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable> recognizeTextWithServiceResponseAsync(String url, Boolean detectHandwriting) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (url == null) { + throw new IllegalArgumentException("Parameter url is required and cannot be null."); + } + ImageUrl imageUrl = new ImageUrl(); + imageUrl.withUrl(url); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.recognizeText(detectHandwriting, this.acceptLanguage(), imageUrl, parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponseWithHeaders clientResponse = recognizeTextDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponseWithHeaders recognizeTextDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(202, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .buildWithHeaders(response, RecognizeTextHeaders.class); + } + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Handwritten Text' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TextOperationResultInner object if successful. + */ + public TextOperationResultInner getTextOperationResult(String operationId) { + return getTextOperationResultWithServiceResponseAsync(operationId).toBlocking().single().body(); + } + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Handwritten Text' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture getTextOperationResultAsync(String operationId, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(getTextOperationResultWithServiceResponseAsync(operationId), serviceCallback); + } + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Handwritten Text' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TextOperationResultInner object + */ + public Observable getTextOperationResultAsync(String operationId) { + return getTextOperationResultWithServiceResponseAsync(operationId).map(new Func1, TextOperationResultInner>() { + @Override + public TextOperationResultInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This interface is used for getting text operation result. The URL to this interface should be retrieved from 'Operation-Location' field returned from Recognize Text interface. + * + * @param operationId Id of the text operation returned in the response of the 'Recognize Handwritten Text' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TextOperationResultInner object + */ + public Observable> getTextOperationResultWithServiceResponseAsync(String operationId) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (operationId == null) { + throw new IllegalArgumentException("Parameter operationId is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + return service.getTextOperationResult(operationId, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = getTextOperationResultDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse getTextOperationResultDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysisInner object if successful. + */ + public ImageAnalysisInner analyzeImageInStream(byte[] image) { + return analyzeImageInStreamWithServiceResponseAsync(image).toBlocking().single().body(); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageInStreamAsync(byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageInStreamWithServiceResponseAsync(image), serviceCallback); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysisInner object + */ + public Observable analyzeImageInStreamAsync(byte[] image) { + return analyzeImageInStreamWithServiceResponseAsync(image).map(new Func1, ImageAnalysisInner>() { + @Override + public ImageAnalysisInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysisInner object + */ + public Observable> analyzeImageInStreamWithServiceResponseAsync(byte[] image) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final List visualFeatures = null; + final String details = null; + final String language = null; + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.analyzeImageInStream(visualFeaturesConverted, details, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageAnalysisInner object if successful. + */ + public ImageAnalysisInner analyzeImageInStream(byte[] image, List visualFeatures, String details, String language) { + return analyzeImageInStreamWithServiceResponseAsync(image, visualFeatures, details, language).toBlocking().single().body(); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageInStreamAsync(byte[] image, List visualFeatures, String details, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageInStreamWithServiceResponseAsync(image, visualFeatures, details, language), serviceCallback); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysisInner object + */ + public Observable analyzeImageInStreamAsync(byte[] image, List visualFeatures, String details, String language) { + return analyzeImageInStreamWithServiceResponseAsync(image, visualFeatures, details, language).map(new Func1, ImageAnalysisInner>() { + @Override + public ImageAnalysisInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation extracts a rich set of visual features based on the image content. + * + * @param image An image stream. + * @param visualFeatures A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include:Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white.Adult - detects if the image is pornographic in nature (depicts nudity or a sex act). Sexually suggestive content is also detected. + * @param details A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include:Celebrities - identifies celebrities if detected in the image. Possible values include: 'Celebrities', 'Landmarks' + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageAnalysisInner object + */ + public Observable> analyzeImageInStreamWithServiceResponseAsync(byte[] image, List visualFeatures, String details, String language) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + Validator.validate(visualFeatures); + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + String visualFeaturesConverted = this.serializerAdapter().serializeList(visualFeatures, CollectionFormat.CSV);RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.analyzeImageInStream(visualFeaturesConverted, details, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse analyzeImageInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + public InputStream generateThumbnailInStream(int width, int height, byte[] image) { + return generateThumbnailInStreamWithServiceResponseAsync(width, height, image).toBlocking().single().body(); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture generateThumbnailInStreamAsync(int width, int height, byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(generateThumbnailInStreamWithServiceResponseAsync(width, height, image), serviceCallback); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable generateThumbnailInStreamAsync(int width, int height, byte[] image) { + return generateThumbnailInStreamWithServiceResponseAsync(width, height, image).map(new Func1, InputStream>() { + @Override + public InputStream call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable> generateThumbnailInStreamWithServiceResponseAsync(int width, int height, byte[] image) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final Boolean smartCropping = null; + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.generateThumbnailInStream(width, height, imageConverted, smartCropping, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = generateThumbnailInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws CloudException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the InputStream object if successful. + */ + public InputStream generateThumbnailInStream(int width, int height, byte[] image, Boolean smartCropping) { + return generateThumbnailInStreamWithServiceResponseAsync(width, height, image, smartCropping).toBlocking().single().body(); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture generateThumbnailInStreamAsync(int width, int height, byte[] image, Boolean smartCropping, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(generateThumbnailInStreamWithServiceResponseAsync(width, height, image, smartCropping), serviceCallback); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable generateThumbnailInStreamAsync(int width, int height, byte[] image, Boolean smartCropping) { + return generateThumbnailInStreamWithServiceResponseAsync(width, height, image, smartCropping).map(new Func1, InputStream>() { + @Override + public InputStream call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image. A successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong. + * + * @param width Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param height Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50. + * @param image An image stream. + * @param smartCropping Boolean flag for enabling smart cropping. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the InputStream object + */ + public Observable> generateThumbnailInStreamWithServiceResponseAsync(int width, int height, byte[] image, Boolean smartCropping) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.generateThumbnailInStream(width, height, imageConverted, smartCropping, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = generateThumbnailInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse generateThumbnailInStreamDelegate(Response response) throws CloudException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(CloudException.class) + .build(response); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResultInner object if successful. + */ + public OcrResultInner recognizePrintedTextInStream(boolean detectOrientation, byte[] image) { + return recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image).toBlocking().single().body(); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image), serviceCallback); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResultInner object + */ + public Observable recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image) { + return recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image).map(new Func1, OcrResultInner>() { + @Override + public OcrResultInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResultInner object + */ + public Observable> recognizePrintedTextInStreamWithServiceResponseAsync(boolean detectOrientation, byte[] image) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final OcrLanguages language = null; + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.recognizePrintedTextInStream(language, detectOrientation, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = recognizePrintedTextInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the OcrResultInner object if successful. + */ + public OcrResultInner recognizePrintedTextInStream(boolean detectOrientation, byte[] image, OcrLanguages language) { + return recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image, language).toBlocking().single().body(); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image, OcrLanguages language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image, language), serviceCallback); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResultInner object + */ + public Observable recognizePrintedTextInStreamAsync(boolean detectOrientation, byte[] image, OcrLanguages language) { + return recognizePrintedTextInStreamWithServiceResponseAsync(detectOrientation, image, language).map(new Func1, OcrResultInner>() { + @Override + public OcrResultInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * Optical Character Recognition (OCR) detects printed text in an image and extracts the recognized characters into a machine-usable character stream. Upon success, the OCR results will be returned. Upon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError. + * + * @param detectOrientation Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down). + * @param image An image stream. + * @param language The BCP-47 language code of the text to be detected in the image. The default value is 'unk'. Possible values include: 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr-Latn', 'sk' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the OcrResultInner object + */ + public Observable> recognizePrintedTextInStreamWithServiceResponseAsync(boolean detectOrientation, byte[] image, OcrLanguages language) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.recognizePrintedTextInStream(language, detectOrientation, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = recognizePrintedTextInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse recognizePrintedTextInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescriptionInner object if successful. + */ + public ImageDescriptionInner describeImageInStream(byte[] image) { + return describeImageInStreamWithServiceResponseAsync(image).toBlocking().single().body(); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture describeImageInStreamAsync(byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(describeImageInStreamWithServiceResponseAsync(image), serviceCallback); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescriptionInner object + */ + public Observable describeImageInStreamAsync(byte[] image) { + return describeImageInStreamWithServiceResponseAsync(image).map(new Func1, ImageDescriptionInner>() { + @Override + public ImageDescriptionInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescriptionInner object + */ + public Observable> describeImageInStreamWithServiceResponseAsync(byte[] image) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final String maxCandidates = null; + final String language = null; + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.describeImageInStream(maxCandidates, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = describeImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the ImageDescriptionInner object if successful. + */ + public ImageDescriptionInner describeImageInStream(byte[] image, String maxCandidates, String language) { + return describeImageInStreamWithServiceResponseAsync(image, maxCandidates, language).toBlocking().single().body(); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture describeImageInStreamAsync(byte[] image, String maxCandidates, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(describeImageInStreamWithServiceResponseAsync(image, maxCandidates, language), serviceCallback); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescriptionInner object + */ + public Observable describeImageInStreamAsync(byte[] image, String maxCandidates, String language) { + return describeImageInStreamWithServiceResponseAsync(image, maxCandidates, language).map(new Func1, ImageDescriptionInner>() { + @Override + public ImageDescriptionInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. All descriptions are in English. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL.A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param image An image stream. + * @param maxCandidates Maximum number of candidate descriptions to be returned. The default is 1. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the ImageDescriptionInner object + */ + public Observable> describeImageInStreamWithServiceResponseAsync(byte[] image, String maxCandidates, String language) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.describeImageInStream(maxCandidates, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = describeImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse describeImageInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResultInner object if successful. + */ + public TagResultInner tagImageInStream(byte[] image) { + return tagImageInStreamWithServiceResponseAsync(image).toBlocking().single().body(); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture tagImageInStreamAsync(byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(tagImageInStreamWithServiceResponseAsync(image), serviceCallback); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResultInner object + */ + public Observable tagImageInStreamAsync(byte[] image) { + return tagImageInStreamWithServiceResponseAsync(image).map(new Func1, TagResultInner>() { + @Override + public TagResultInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResultInner object + */ + public Observable> tagImageInStreamWithServiceResponseAsync(byte[] image) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final String language = null; + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.tagImageInStream(language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = tagImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the TagResultInner object if successful. + */ + public TagResultInner tagImageInStream(byte[] image, String language) { + return tagImageInStreamWithServiceResponseAsync(image, language).toBlocking().single().body(); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture tagImageInStreamAsync(byte[] image, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(tagImageInStreamWithServiceResponseAsync(image, language), serviceCallback); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResultInner object + */ + public Observable tagImageInStreamAsync(byte[] image, String language) { + return tagImageInStreamWithServiceResponseAsync(image, language).map(new Func1, TagResultInner>() { + @Override + public TagResultInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag 'cello' may be accompanied by the hint 'musical instrument'. All tags are in English. + * + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the TagResultInner object + */ + public Observable> tagImageInStreamWithServiceResponseAsync(byte[] image, String language) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.tagImageInStream(language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = tagImageInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse tagImageInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResultsInner object if successful. + */ + public DomainModelResultsInner analyzeImageByDomainInStream(String model, byte[] image) { + return analyzeImageByDomainInStreamWithServiceResponseAsync(model, image).toBlocking().single().body(); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageByDomainInStreamAsync(String model, byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageByDomainInStreamWithServiceResponseAsync(model, image), serviceCallback); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResultsInner object + */ + public Observable analyzeImageByDomainInStreamAsync(String model, byte[] image) { + return analyzeImageByDomainInStreamWithServiceResponseAsync(model, image).map(new Func1, DomainModelResultsInner>() { + @Override + public DomainModelResultsInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResultsInner object + */ + public Observable> analyzeImageByDomainInStreamWithServiceResponseAsync(String model, byte[] image) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (model == null) { + throw new IllegalArgumentException("Parameter model is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final String language = null; + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.analyzeImageByDomainInStream(model, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageByDomainInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + * @return the DomainModelResultsInner object if successful. + */ + public DomainModelResultsInner analyzeImageByDomainInStream(String model, byte[] image, String language) { + return analyzeImageByDomainInStreamWithServiceResponseAsync(model, image, language).toBlocking().single().body(); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture analyzeImageByDomainInStreamAsync(String model, byte[] image, String language, final ServiceCallback serviceCallback) { + return ServiceFuture.fromResponse(analyzeImageByDomainInStreamWithServiceResponseAsync(model, image, language), serviceCallback); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResultsInner object + */ + public Observable analyzeImageByDomainInStreamAsync(String model, byte[] image, String language) { + return analyzeImageByDomainInStreamWithServiceResponseAsync(model, image, language).map(new Func1, DomainModelResultsInner>() { + @Override + public DomainModelResultsInner call(ServiceResponse response) { + return response.body(); + } + }); + } + + /** + * This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API only provides a single domain-specific model: celebrities. Two input methods are supported -- (1) Uploading an image or (2) specifying an image URL. A successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong. + * + * @param model The domain-specific content to recognize. + * @param image An image stream. + * @param language The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default.ja - Japanese pt - Portuguese zh - Simplified Chinese. Possible values include: 'en', 'ja', 'pt', 'zh' + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the observable to the DomainModelResultsInner object + */ + public Observable> analyzeImageByDomainInStreamWithServiceResponseAsync(String model, byte[] image, String language) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (model == null) { + throw new IllegalArgumentException("Parameter model is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.analyzeImageByDomainInStream(model, language, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponse clientResponse = analyzeImageByDomainInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponse analyzeImageByDomainInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(200, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .build(response); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + */ + public void recognizeTextInStream(byte[] image) { + recognizeTextInStreamWithServiceResponseAsync(image).toBlocking().single().body(); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param image An image stream. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizeTextInStreamAsync(byte[] image, final ServiceCallback serviceCallback) { + return ServiceFuture.fromHeaderResponse(recognizeTextInStreamWithServiceResponseAsync(image), serviceCallback); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable recognizeTextInStreamAsync(byte[] image) { + return recognizeTextInStreamWithServiceResponseAsync(image).map(new Func1, Void>() { + @Override + public Void call(ServiceResponseWithHeaders response) { + return response.body(); + } + }); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param image An image stream. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable> recognizeTextInStreamWithServiceResponseAsync(byte[] image) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + final Boolean detectHandwriting = null; + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.recognizeTextInStream(detectHandwriting, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponseWithHeaders clientResponse = recognizeTextInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param image An image stream. + * @param detectHandwriting If 'true' is specified, handwriting recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @throws ComputerVisionErrorException thrown if the request is rejected by server + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent + */ + public void recognizeTextInStream(byte[] image, Boolean detectHandwriting) { + recognizeTextInStreamWithServiceResponseAsync(image, detectHandwriting).toBlocking().single().body(); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param image An image stream. + * @param detectHandwriting If 'true' is specified, handwriting recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. + * @param serviceCallback the async ServiceCallback to handle successful and failed responses. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceFuture} object + */ + public ServiceFuture recognizeTextInStreamAsync(byte[] image, Boolean detectHandwriting, final ServiceCallback serviceCallback) { + return ServiceFuture.fromHeaderResponse(recognizeTextInStreamWithServiceResponseAsync(image, detectHandwriting), serviceCallback); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param image An image stream. + * @param detectHandwriting If 'true' is specified, handwriting recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable recognizeTextInStreamAsync(byte[] image, Boolean detectHandwriting) { + return recognizeTextInStreamWithServiceResponseAsync(image, detectHandwriting).map(new Func1, Void>() { + @Override + public Void call(ServiceResponseWithHeaders response) { + return response.body(); + } + }); + } + + /** + * Recognize Text operation. When you use the Recognize Text interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your Get Handwritten Text Operation Result operation. + * + * @param image An image stream. + * @param detectHandwriting If 'true' is specified, handwriting recognition is performed. If this parameter is set to 'false' or is not specified, printed text recognition is performed. + * @throws IllegalArgumentException thrown if parameters fail the validation + * @return the {@link ServiceResponseWithHeaders} object if successful. + */ + public Observable> recognizeTextInStreamWithServiceResponseAsync(byte[] image, Boolean detectHandwriting) { + if (this.azureRegion() == null) { + throw new IllegalArgumentException("Parameter this.azureRegion() is required and cannot be null."); + } + if (image == null) { + throw new IllegalArgumentException("Parameter image is required and cannot be null."); + } + String parameterizedHost = Joiner.on(", ").join("{AzureRegion}", this.azureRegion()); + RequestBody imageConverted = RequestBody.create(MediaType.parse("application/octet-stream"), image); + return service.recognizeTextInStream(detectHandwriting, imageConverted, this.acceptLanguage(), parameterizedHost, this.userAgent()) + .flatMap(new Func1, Observable>>() { + @Override + public Observable> call(Response response) { + try { + ServiceResponseWithHeaders clientResponse = recognizeTextInStreamDelegate(response); + return Observable.just(clientResponse); + } catch (Throwable t) { + return Observable.error(t); + } + } + }); + } + + private ServiceResponseWithHeaders recognizeTextInStreamDelegate(Response response) throws ComputerVisionErrorException, IOException, IllegalArgumentException { + return this.restClient().responseBuilderFactory().newInstance(this.serializerAdapter()) + .register(202, new TypeToken() { }.getType()) + .registerError(ComputerVisionErrorException.class) + .buildWithHeaders(response, RecognizeTextInStreamHeaders.class); + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/DomainModelResultsInner.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/DomainModelResultsInner.java new file mode 100644 index 00000000000..c23c8107166 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/DomainModelResultsInner.java @@ -0,0 +1,97 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; + +import com.microsoft.azure.cognitiveservices.vision.computervision.ImageMetadata; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Result of image analysis using a specific domain model including additional + * metadata. + */ +public class DomainModelResultsInner { + /** + * Model-specific response. + */ + @JsonProperty(value = "result") + private Object result; + + /** + * Id of the REST API request. + */ + @JsonProperty(value = "requestId") + private String requestId; + + /** + * The metadata property. + */ + @JsonProperty(value = "metadata") + private ImageMetadata metadata; + + /** + * Get model-specific response. + * + * @return the result value + */ + public Object result() { + return this.result; + } + + /** + * Set model-specific response. + * + * @param result the result value to set + * @return the DomainModelResultsInner object itself. + */ + public DomainModelResultsInner withResult(Object result) { + this.result = result; + return this; + } + + /** + * Get id of the REST API request. + * + * @return the requestId value + */ + public String requestId() { + return this.requestId; + } + + /** + * Set id of the REST API request. + * + * @param requestId the requestId value to set + * @return the DomainModelResultsInner object itself. + */ + public DomainModelResultsInner withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value + */ + public ImageMetadata metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set + * @return the DomainModelResultsInner object itself. + */ + public DomainModelResultsInner withMetadata(ImageMetadata metadata) { + this.metadata = metadata; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ImageAnalysisInner.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ImageAnalysisInner.java new file mode 100644 index 00000000000..89aa01a14f8 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ImageAnalysisInner.java @@ -0,0 +1,260 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; + +import java.util.List; +import com.microsoft.azure.cognitiveservices.vision.computervision.Category; +import com.microsoft.azure.cognitiveservices.vision.computervision.AdultInfo; +import com.microsoft.azure.cognitiveservices.vision.computervision.ColorInfo; +import com.microsoft.azure.cognitiveservices.vision.computervision.ImageType; +import com.microsoft.azure.cognitiveservices.vision.computervision.ImageTag; +import com.microsoft.azure.cognitiveservices.vision.computervision.ImageDescriptionDetails; +import com.microsoft.azure.cognitiveservices.vision.computervision.FaceDescription; +import com.microsoft.azure.cognitiveservices.vision.computervision.ImageMetadata; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Result of AnalyzeImage operation. + */ +public class ImageAnalysisInner { + /** + * An array indicating identified categories. + */ + @JsonProperty(value = "categories") + private List categories; + + /** + * The adult property. + */ + @JsonProperty(value = "adult") + private AdultInfo adult; + + /** + * The color property. + */ + @JsonProperty(value = "color") + private ColorInfo color; + + /** + * The imageType property. + */ + @JsonProperty(value = "imageType") + private ImageType imageType; + + /** + * A list of tags with confidence level. + */ + @JsonProperty(value = "tags") + private List tags; + + /** + * The description property. + */ + @JsonProperty(value = "description") + private ImageDescriptionDetails description; + + /** + * An array of possible faces within the image. + */ + @JsonProperty(value = "faces") + private List faces; + + /** + * Id of the request for tracking purposes. + */ + @JsonProperty(value = "requestId") + private String requestId; + + /** + * The metadata property. + */ + @JsonProperty(value = "metadata") + private ImageMetadata metadata; + + /** + * Get an array indicating identified categories. + * + * @return the categories value + */ + public List categories() { + return this.categories; + } + + /** + * Set an array indicating identified categories. + * + * @param categories the categories value to set + * @return the ImageAnalysisInner object itself. + */ + public ImageAnalysisInner withCategories(List categories) { + this.categories = categories; + return this; + } + + /** + * Get the adult value. + * + * @return the adult value + */ + public AdultInfo adult() { + return this.adult; + } + + /** + * Set the adult value. + * + * @param adult the adult value to set + * @return the ImageAnalysisInner object itself. + */ + public ImageAnalysisInner withAdult(AdultInfo adult) { + this.adult = adult; + return this; + } + + /** + * Get the color value. + * + * @return the color value + */ + public ColorInfo color() { + return this.color; + } + + /** + * Set the color value. + * + * @param color the color value to set + * @return the ImageAnalysisInner object itself. + */ + public ImageAnalysisInner withColor(ColorInfo color) { + this.color = color; + return this; + } + + /** + * Get the imageType value. + * + * @return the imageType value + */ + public ImageType imageType() { + return this.imageType; + } + + /** + * Set the imageType value. + * + * @param imageType the imageType value to set + * @return the ImageAnalysisInner object itself. + */ + public ImageAnalysisInner withImageType(ImageType imageType) { + this.imageType = imageType; + return this; + } + + /** + * Get a list of tags with confidence level. + * + * @return the tags value + */ + public List tags() { + return this.tags; + } + + /** + * Set a list of tags with confidence level. + * + * @param tags the tags value to set + * @return the ImageAnalysisInner object itself. + */ + public ImageAnalysisInner withTags(List tags) { + this.tags = tags; + return this; + } + + /** + * Get the description value. + * + * @return the description value + */ + public ImageDescriptionDetails description() { + return this.description; + } + + /** + * Set the description value. + * + * @param description the description value to set + * @return the ImageAnalysisInner object itself. + */ + public ImageAnalysisInner withDescription(ImageDescriptionDetails description) { + this.description = description; + return this; + } + + /** + * Get an array of possible faces within the image. + * + * @return the faces value + */ + public List faces() { + return this.faces; + } + + /** + * Set an array of possible faces within the image. + * + * @param faces the faces value to set + * @return the ImageAnalysisInner object itself. + */ + public ImageAnalysisInner withFaces(List faces) { + this.faces = faces; + return this; + } + + /** + * Get id of the request for tracking purposes. + * + * @return the requestId value + */ + public String requestId() { + return this.requestId; + } + + /** + * Set id of the request for tracking purposes. + * + * @param requestId the requestId value to set + * @return the ImageAnalysisInner object itself. + */ + public ImageAnalysisInner withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value + */ + public ImageMetadata metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set + * @return the ImageAnalysisInner object itself. + */ + public ImageAnalysisInner withMetadata(ImageMetadata metadata) { + this.metadata = metadata; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ImageDescriptionInner.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ImageDescriptionInner.java new file mode 100644 index 00000000000..ae7921825ae --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ImageDescriptionInner.java @@ -0,0 +1,127 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; + +import java.util.List; +import com.microsoft.azure.cognitiveservices.vision.computervision.ImageCaption; +import com.microsoft.azure.cognitiveservices.vision.computervision.ImageMetadata; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.microsoft.rest.serializer.JsonFlatten; + +/** + * A collection of content tags, along with a list of captions sorted by + * confidence level, and image metadata. + */ +@JsonFlatten +public class ImageDescriptionInner { + /** + * A collection of image tags. + */ + @JsonProperty(value = "description.tags") + private List tags; + + /** + * A list of captions, sorted by confidence level. + */ + @JsonProperty(value = "description.captions") + private List captions; + + /** + * Id of the REST API request. + */ + @JsonProperty(value = "description.requestId") + private String requestId; + + /** + * The metadata property. + */ + @JsonProperty(value = "description.metadata") + private ImageMetadata metadata; + + /** + * Get a collection of image tags. + * + * @return the tags value + */ + public List tags() { + return this.tags; + } + + /** + * Set a collection of image tags. + * + * @param tags the tags value to set + * @return the ImageDescriptionInner object itself. + */ + public ImageDescriptionInner withTags(List tags) { + this.tags = tags; + return this; + } + + /** + * Get a list of captions, sorted by confidence level. + * + * @return the captions value + */ + public List captions() { + return this.captions; + } + + /** + * Set a list of captions, sorted by confidence level. + * + * @param captions the captions value to set + * @return the ImageDescriptionInner object itself. + */ + public ImageDescriptionInner withCaptions(List captions) { + this.captions = captions; + return this; + } + + /** + * Get id of the REST API request. + * + * @return the requestId value + */ + public String requestId() { + return this.requestId; + } + + /** + * Set id of the REST API request. + * + * @param requestId the requestId value to set + * @return the ImageDescriptionInner object itself. + */ + public ImageDescriptionInner withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value + */ + public ImageMetadata metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set + * @return the ImageDescriptionInner object itself. + */ + public ImageDescriptionInner withMetadata(ImageMetadata metadata) { + this.metadata = metadata; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ListModelsResultInner.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ListModelsResultInner.java new file mode 100644 index 00000000000..855626f3d11 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/ListModelsResultInner.java @@ -0,0 +1,34 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; + +import java.util.List; +import com.microsoft.azure.cognitiveservices.vision.computervision.ModelDescription; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * Result of the List Domain Models operation. + */ +public class ListModelsResultInner { + /** + * An array of supported models. + */ + @JsonProperty(value = "models", access = JsonProperty.Access.WRITE_ONLY) + private List modelsProperty; + + /** + * Get an array of supported models. + * + * @return the modelsProperty value + */ + public List modelsProperty() { + return this.modelsProperty; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/OcrResultInner.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/OcrResultInner.java new file mode 100644 index 00000000000..329e95aaf79 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/OcrResultInner.java @@ -0,0 +1,136 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; + +import java.util.List; +import com.microsoft.azure.cognitiveservices.vision.computervision.OcrRegion; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * The OcrResultInner model. + */ +public class OcrResultInner { + /** + * The BCP-47 language code of the text in the image. + */ + @JsonProperty(value = "language") + private String language; + + /** + * The angle, in degrees, of the detected text with respect to the closest + * horizontal or vertical direction. After rotating the input image + * clockwise by this angle, the recognized text lines become horizontal or + * vertical. In combination with the orientation property it can be used to + * overlay recognition results correctly on the original image, by rotating + * either the original image or recognition results by a suitable angle + * around the center of the original image. If the angle cannot be + * confidently detected, this property is not present. If the image + * contains text at different angles, only part of the text will be + * recognized correctly. + */ + @JsonProperty(value = "textAngle") + private Double textAngle; + + /** + * Orientation of the text recognized in the image. The value + * (up,down,left, or right) refers to the direction that the top of the + * recognized text is facing, after the image has been rotated around its + * center according to the detected text angle (see textAngle property). + */ + @JsonProperty(value = "orientation") + private String orientation; + + /** + * An array of objects, where each object represents a region of recognized + * text. + */ + @JsonProperty(value = "regions") + private List regions; + + /** + * Get the BCP-47 language code of the text in the image. + * + * @return the language value + */ + public String language() { + return this.language; + } + + /** + * Set the BCP-47 language code of the text in the image. + * + * @param language the language value to set + * @return the OcrResultInner object itself. + */ + public OcrResultInner withLanguage(String language) { + this.language = language; + return this; + } + + /** + * Get the angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly. + * + * @return the textAngle value + */ + public Double textAngle() { + return this.textAngle; + } + + /** + * Set the angle, in degrees, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly. + * + * @param textAngle the textAngle value to set + * @return the OcrResultInner object itself. + */ + public OcrResultInner withTextAngle(Double textAngle) { + this.textAngle = textAngle; + return this; + } + + /** + * Get orientation of the text recognized in the image. The value (up,down,left, or right) refers to the direction that the top of the recognized text is facing, after the image has been rotated around its center according to the detected text angle (see textAngle property). + * + * @return the orientation value + */ + public String orientation() { + return this.orientation; + } + + /** + * Set orientation of the text recognized in the image. The value (up,down,left, or right) refers to the direction that the top of the recognized text is facing, after the image has been rotated around its center according to the detected text angle (see textAngle property). + * + * @param orientation the orientation value to set + * @return the OcrResultInner object itself. + */ + public OcrResultInner withOrientation(String orientation) { + this.orientation = orientation; + return this; + } + + /** + * Get an array of objects, where each object represents a region of recognized text. + * + * @return the regions value + */ + public List regions() { + return this.regions; + } + + /** + * Set an array of objects, where each object represents a region of recognized text. + * + * @param regions the regions value to set + * @return the OcrResultInner object itself. + */ + public OcrResultInner withRegions(List regions) { + this.regions = regions; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/TagResultInner.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/TagResultInner.java new file mode 100644 index 00000000000..17f15fa4789 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/TagResultInner.java @@ -0,0 +1,98 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; + +import java.util.List; +import com.microsoft.azure.cognitiveservices.vision.computervision.ImageTag; +import com.microsoft.azure.cognitiveservices.vision.computervision.ImageMetadata; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * The results of a image tag operation, including any tags and image metadata. + */ +public class TagResultInner { + /** + * A list of tags with confidence level. + */ + @JsonProperty(value = "tags") + private List tags; + + /** + * Id of the REST API request. + */ + @JsonProperty(value = "requestId") + private String requestId; + + /** + * The metadata property. + */ + @JsonProperty(value = "metadata") + private ImageMetadata metadata; + + /** + * Get a list of tags with confidence level. + * + * @return the tags value + */ + public List tags() { + return this.tags; + } + + /** + * Set a list of tags with confidence level. + * + * @param tags the tags value to set + * @return the TagResultInner object itself. + */ + public TagResultInner withTags(List tags) { + this.tags = tags; + return this; + } + + /** + * Get id of the REST API request. + * + * @return the requestId value + */ + public String requestId() { + return this.requestId; + } + + /** + * Set id of the REST API request. + * + * @param requestId the requestId value to set + * @return the TagResultInner object itself. + */ + public TagResultInner withRequestId(String requestId) { + this.requestId = requestId; + return this; + } + + /** + * Get the metadata value. + * + * @return the metadata value + */ + public ImageMetadata metadata() { + return this.metadata; + } + + /** + * Set the metadata value. + * + * @param metadata the metadata value to set + * @return the TagResultInner object itself. + */ + public TagResultInner withMetadata(ImageMetadata metadata) { + this.metadata = metadata; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/TextOperationResultInner.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/TextOperationResultInner.java new file mode 100644 index 00000000000..a0bf8afa878 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/TextOperationResultInner.java @@ -0,0 +1,72 @@ +/** + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + */ + +package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; + +import com.microsoft.azure.cognitiveservices.vision.computervision.TextOperationStatusCodes; +import com.microsoft.azure.cognitiveservices.vision.computervision.RecognitionResult; +import com.fasterxml.jackson.annotation.JsonProperty; + +/** + * The TextOperationResultInner model. + */ +public class TextOperationResultInner { + /** + * Status of the text operation. Possible values include: 'Not Started', + * 'Running', 'Failed', 'Succeeded'. + */ + @JsonProperty(value = "status") + private TextOperationStatusCodes status; + + /** + * The recognitionResult property. + */ + @JsonProperty(value = "recognitionResult") + private RecognitionResult recognitionResult; + + /** + * Get status of the text operation. Possible values include: 'Not Started', 'Running', 'Failed', 'Succeeded'. + * + * @return the status value + */ + public TextOperationStatusCodes status() { + return this.status; + } + + /** + * Set status of the text operation. Possible values include: 'Not Started', 'Running', 'Failed', 'Succeeded'. + * + * @param status the status value to set + * @return the TextOperationResultInner object itself. + */ + public TextOperationResultInner withStatus(TextOperationStatusCodes status) { + this.status = status; + return this; + } + + /** + * Get the recognitionResult value. + * + * @return the recognitionResult value + */ + public RecognitionResult recognitionResult() { + return this.recognitionResult; + } + + /** + * Set the recognitionResult value. + * + * @param recognitionResult the recognitionResult value to set + * @return the TextOperationResultInner object itself. + */ + public TextOperationResultInner withRecognitionResult(RecognitionResult recognitionResult) { + this.recognitionResult = recognitionResult; + return this; + } + +} diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java new file mode 100644 index 00000000000..70eaef23dc6 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/implementation/package-info.java @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. + +/** + * This package contains the implementation classes for ComputerVisionAPI. + * The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively. + */ +package com.microsoft.azure.cognitiveservices.vision.computervision.implementation; diff --git a/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java new file mode 100644 index 00000000000..c40a580b7e7 --- /dev/null +++ b/azure-cognitiveservices/vision/computervision/src/main/java/com/microsoft/azure/cognitiveservices/vision/computervision/package-info.java @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for +// license information. +// +// Code generated by Microsoft (R) AutoRest Code Generator. + +/** + * This package contains the classes for ComputerVisionAPI. + * The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively. + */ +package com.microsoft.azure.cognitiveservices.vision.computervision;