diff --git a/specification/cognitiveservices/data-plane/ComputerVision/readme.go.md b/specification/cognitiveservices/data-plane/ComputerVision/readme.go.md index 97861eeff941..1386894f71a3 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/readme.go.md +++ b/specification/cognitiveservices/data-plane/ComputerVision/readme.go.md @@ -16,6 +16,7 @@ batch: - tag: release_2_0 - tag: release_2_1 - tag: release_3_0 + - tag: release_3_1 ``` ### Tag: release_2_0 and go @@ -45,11 +46,20 @@ Please also specify `--go-sdk-folder=`. + +``` yaml $(tag) == 'release_3_1_preview_2' && $(go) +output-folder: $(go-sdk-folder)/services/preview/cognitiveservices/v3.1-preview.2/$(namespace) +``` + ### Tag: release_3_1 and go These settings apply only when `--tag=release_3_1 --go` is specified on the command line. Please also specify `--go-sdk-folder=`. ``` yaml $(tag) == 'release_3_1' && $(go) -output-folder: $(go-sdk-folder)/services/preview/cognitiveservices/v3.1-preview.2/$(namespace) +output-folder: $(go-sdk-folder)/services/cognitiveservices/v3.1/$(namespace) ``` \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/readme.md b/specification/cognitiveservices/data-plane/ComputerVision/readme.md index af1edea7aad7..aa285d970a63 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/readme.md +++ b/specification/cognitiveservices/data-plane/ComputerVision/readme.md @@ -4,11 +4,11 @@ Configuration for generating Computer Vision SDK. -The current release is `release_3_0`. +The current release is `release_3_1`. ``` yaml -tag: release_3_0 +tag: release_3_1 add-credentials: true openapi-type: data-plane ``` @@ -57,6 +57,15 @@ input-file: - preview/v3.1-preview.2/Ocr.json ``` +### Release 3.1 +These settings apply only when `--tag=release_3_1` is specified on the command line. + +``` yaml $(tag) == 'release_3_1' +input-file: + - stable/v3.1/ComputerVision.json + - stable/v3.1/Ocr.json +``` + ## Swagger to SDK This section describes what SDK should be generated by the automatic system. diff --git a/specification/cognitiveservices/data-plane/ComputerVision/readme.ruby.md b/specification/cognitiveservices/data-plane/ComputerVision/readme.ruby.md index 019525023fad..b93a320d0992 100644 --- a/specification/cognitiveservices/data-plane/ComputerVision/readme.ruby.md +++ b/specification/cognitiveservices/data-plane/ComputerVision/readme.ruby.md @@ -41,4 +41,15 @@ Please also specify `--ruby-sdks-folder=`. + +``` yaml $(tag) == 'release_3_1' && $(ruby) +namespace: "Azure::CognitiveServices::ComputerVision::V3_1" +output-folder: $(ruby-sdks-folder)/data/azure_cognitiveservices_computervision/lib +title: "ComputerVisionClient" ``` \ No newline at end of file diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/ComputerVision.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/ComputerVision.json new file mode 100644 index 000000000000..35ef27a5a632 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/ComputerVision.json @@ -0,0 +1,1840 @@ +{ + "swagger": "2.0", + "info": { + "version": "3.1", + "title": "Computer Vision Client", + "description": "The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively." + }, + "securityDefinitions": { + "apim_key": { + "type": "apiKey", + "name": "Ocp-Apim-Subscription-Key", + "in": "header" + } + }, + "security": [ + { + "apim_key": [] + } + ], + "x-ms-parameterized-host": { + "hostTemplate": "{Endpoint}", + "useSchemePrefix": false, + "parameters": [ + { + "$ref": "#/parameters/Endpoint" + } + ] + }, + "host": "westcentralus.api.cognitive.microsoft.com", + "basePath": "/vision/v3.1", + "schemes": [ + "https" + ], + "paths": { + "/analyze": { + "post": { + "description": "This operation extracts a rich set of visual features based on the image content.\r\nTwo input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response.\r\nA successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "AnalyzeImage", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/VisualFeatures" + }, + { + "name": "details", + "in": "query", + "description": "A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include: Celebrities - identifies celebrities if detected in the image, Landmarks - identifies notable landmarks in the image.", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "Celebrities", + "Landmarks" + ], + "x-nullable": false, + "x-ms-enum": { + "name": "Details", + "modelAsString": false + } + }, + "collectionFormat": "csv", + "x-nullable": true + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "#/parameters/DescriptionExclude" + }, + { + "$ref": "#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "The response include the extracted features in JSON format. Here is the definitions for enumeration types:\r\n ClipartType\r\n Non - clipart = 0, ambiguous = 1, normal - clipart = 2, good - clipart = 3. LineDrawingTypeNon - LineDrawing = 0, LineDrawing = 1.", + "schema": { + "$ref": "#/definitions/ImageAnalysis" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful AnalyzeImage request": { + "$ref": "./examples/SuccessfulAnalyzeImageWithUrl.json" + } + } + } + }, + "/describe": { + "post": { + "description": "This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. Descriptions may include results from celebrity and landmark domain models, if applicable.\r\nTwo input methods are supported -- (1) Uploading an image or (2) specifying an image URL.\r\nA successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "DescribeImage", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "maxCandidates", + "in": "query", + "description": "Maximum number of candidate descriptions to be returned. The default is 1.", + "required": false, + "type": "integer", + "format": "int32", + "default": 1, + "x-nullable": true + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "#/parameters/DescriptionExclude" + }, + { + "$ref": "#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "Image description object.", + "schema": { + "$ref": "#/definitions/ImageDescription" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful DescribeImage request": { + "$ref": "./examples/SuccessfulDescribeImageWithUrl.json" + } + } + } + }, + "/detect": { + "post": { + "description": "Performs object detection on the specified image.\r\nTwo input methods are supported -- (1) Uploading an image or (2) specifying an image URL.\r\nA successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "DetectObjects", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "The response include the detected objects in JSON format.", + "schema": { + "$ref": "#/definitions/DetectResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful DetectObjects request": { + "$ref": "./examples/SuccessfulDetectObjectsWithUrl.json" + } + } + } + }, + "/models": { + "get": { + "description": "This operation returns the list of domain-specific models that are supported by the Computer Vision API. Currently, the API supports following domain-specific models: celebrity recognizer, landmark recognizer.\r\nA successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "ListModels", + "consumes": [], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "List of available domain models.", + "schema": { + "$ref": "#/definitions/ListModelsResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful ListModels request": { + "$ref": "./examples/SuccessfulListModels.json" + } + } + } + }, + "/models/{model}/analyze": { + "post": { + "description": "This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API provides following domain-specific models: celebrities, landmarks.\r\nTwo input methods are supported -- (1) Uploading an image or (2) specifying an image URL.\r\nA successful response will be returned in JSON.\r\nIf the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "AnalyzeImageByDomain", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "model", + "in": "path", + "description": "The domain-specific content to recognize.", + "required": true, + "type": "string", + "x-nullable": true + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "Analysis result based on the domain model.", + "schema": { + "$ref": "#/definitions/DomainModelResults" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful AnalyzeImageByDomain request": { + "$ref": "./examples/SuccessfulAnalyzeImageByDomainWithUrl.json" + } + } + } + }, + "/ocr": { + "post": { + "description": "Optical Character Recognition (OCR) detects text in an image and extracts the recognized characters into a machine-usable character stream.\r\nUpon success, the OCR results will be returned.\r\nUpon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError.", + "operationId": "RecognizePrintedText", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/DetectOrientation" + }, + { + "$ref": "#/parameters/OcrLanguage" + }, + { + "$ref": "#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "The OCR results in the hierarchy of region/line/word. The results include text, bounding box for regions, lines and words. The angle, in radians, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly.", + "schema": { + "$ref": "#/definitions/OcrResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful RecognizePrintedText request": { + "$ref": "./examples/SuccessfulRecognizePrintedTextWithUrl.json" + } + } + } + }, + "/tag": { + "post": { + "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag \"ascomycete\" may be accompanied by the hint \"fungus\".\r\nTwo input methods are supported -- (1) Uploading an image or (2) specifying an image URL.\r\nA successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "TagImage", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "Image tags object.", + "schema": { + "$ref": "#/definitions/TagResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful TagImage request": { + "$ref": "./examples/SuccessfulTagImageWithUrl.json" + } + } + } + }, + "/generateThumbnail": { + "post": { + "description": "This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image.\r\nA successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong.\r\nUpon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.", + "operationId": "GenerateThumbnail", + "consumes": [ + "application/json" + ], + "produces": [ + "application/octet-stream" + ], + "parameters": [ + { + "name": "width", + "in": "query", + "description": "Width of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50.", + "required": true, + "type": "integer", + "format": "int32", + "maximum": 1024, + "minimum": 1, + "x-nullable": false + }, + { + "name": "height", + "in": "query", + "description": "Height of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50.", + "required": true, + "type": "integer", + "format": "int32", + "maximum": 1024, + "minimum": 1, + "x-nullable": false + }, + { + "name": "smartCropping", + "in": "query", + "description": "Boolean flag for enabling smart cropping.", + "required": false, + "type": "boolean", + "default": false, + "x-nullable": true + }, + { + "$ref": "#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "The generated thumbnail in binary format.", + "schema": { + "type": "file" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful GenerateThumbnail request": { + "$ref": "./examples/SuccessfulGenerateThumbnailWithUrl.json" + } + } + } + }, + "/areaOfInterest": { + "post": { + "description": "This operation returns a bounding box around the most important area of the image.\r\nA successful response will be returned in JSON. If the request failed, the response contains an error code and a message to help determine what went wrong.\r\nUpon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.", + "operationId": "GetAreaOfInterest", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/ImageUrl" + } + ], + "responses": { + "200": { + "description": "The response includes the extracted area of interest in JSON format.", + "schema": { + "$ref": "#/definitions/AreaOfInterestResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful GetAreaOfInterest request": { + "$ref": "./examples/SuccessfulGetAreaOfInterestWithUrl.json" + } + } + } + } + }, + "definitions": { + "ImageAnalysis": { + "description": "Result of AnalyzeImage operation.", + "type": "object", + "properties": { + "categories": { + "description": "An array indicating identified categories.", + "type": "array", + "items": { + "$ref": "#/definitions/Category" + }, + "x-nullable": true + }, + "adult": { + "$ref": "#/definitions/AdultInfo", + "description": "An object describing whether the image contains adult-oriented content and/or is racy." + }, + "color": { + "$ref": "#/definitions/ColorInfo", + "description": "An object providing additional metadata describing color attributes." + }, + "imageType": { + "$ref": "#/definitions/ImageType", + "description": "An object providing possible image types and matching confidence levels." + }, + "tags": { + "description": "A list of tags with confidence level.", + "type": "array", + "items": { + "$ref": "#/definitions/ImageTag" + }, + "x-nullable": true + }, + "description": { + "$ref": "#/definitions/ImageDescriptionDetails", + "description": "A collection of content tags, along with a list of captions sorted by confidence level, and image metadata." + }, + "faces": { + "description": "An array of possible faces within the image.", + "type": "array", + "items": { + "$ref": "#/definitions/FaceDescription" + }, + "x-nullable": true + }, + "objects": { + "description": "Array of objects describing what was detected in the image.", + "type": "array", + "items": { + "$ref": "#/definitions/DetectedObject" + }, + "x-nullable": true + }, + "brands": { + "description": "Array of brands detected in the image.", + "type": "array", + "items": { + "$ref": "#/definitions/DetectedBrand" + }, + "x-nullable": true + }, + "requestId": { + "description": "Id of the REST API request.", + "type": "string", + "x-nullable": true + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + }, + "x-nullable": true + }, + "Category": { + "description": "An object describing identified category.", + "type": "object", + "properties": { + "name": { + "description": "Name of the category.", + "type": "string", + "x-nullable": true + }, + "score": { + "format": "double", + "description": "Scoring of the category.", + "type": "number", + "x-nullable": false + }, + "detail": { + "$ref": "#/definitions/CategoryDetail", + "description": "Details of the identified category." + } + }, + "x-nullable": true + }, + "AdultInfo": { + "description": "An object describing whether the image contains adult-oriented content and/or is racy.", + "type": "object", + "properties": { + "isAdultContent": { + "description": "A value indicating if the image contains adult-oriented content.", + "type": "boolean", + "x-nullable": false + }, + "isRacyContent": { + "description": "A value indicating if the image is racy.", + "type": "boolean", + "x-nullable": false + }, + "isGoryContent": { + "description": "A value indicating if the image is gory.", + "type": "boolean", + "x-nullable": false + }, + "adultScore": { + "format": "double", + "description": "Score from 0 to 1 that indicates how much the content is considered adult-oriented within the image.", + "type": "number", + "x-nullable": false + }, + "racyScore": { + "format": "double", + "description": "Score from 0 to 1 that indicates how suggestive is the image.", + "type": "number", + "x-nullable": false + }, + "goreScore": { + "format": "double", + "description": "Score from 0 to 1 that indicates how gory is the image.", + "type": "number", + "x-nullable": false + } + }, + "x-nullable": true + }, + "ColorInfo": { + "description": "An object providing additional metadata describing color attributes.", + "type": "object", + "properties": { + "dominantColorForeground": { + "description": "Possible dominant foreground color.", + "type": "string", + "x-nullable": true + }, + "dominantColorBackground": { + "description": "Possible dominant background color.", + "type": "string", + "x-nullable": true + }, + "dominantColors": { + "description": "An array of possible dominant colors.", + "type": "array", + "items": { + "type": "string", + "x-nullable": true + }, + "x-nullable": true + }, + "accentColor": { + "description": "Possible accent color.", + "type": "string", + "x-nullable": true + }, + "isBWImg": { + "description": "A value indicating if the image is black and white.", + "type": "boolean", + "x-nullable": false + } + }, + "x-nullable": true + }, + "ImageType": { + "description": "An object providing possible image types and matching confidence levels.", + "type": "object", + "properties": { + "clipArtType": { + "format": "int32", + "description": "Confidence level that the image is a clip art.", + "type": "integer", + "x-nullable": false + }, + "lineDrawingType": { + "format": "int32", + "description": "Confidence level that the image is a line drawing.", + "type": "integer", + "x-nullable": false + } + }, + "x-nullable": true + }, + "ImageTag": { + "description": "An entity observation in the image, along with the confidence score.", + "type": "object", + "properties": { + "name": { + "description": "Name of the entity.", + "type": "string", + "x-nullable": true + }, + "confidence": { + "format": "double", + "description": "The level of confidence that the entity was observed.", + "type": "number", + "x-nullable": false + }, + "hint": { + "description": "Optional hint/details for this tag.", + "type": "string", + "x-nullable": true + } + }, + "x-nullable": true + }, + "ImageDescriptionDetails": { + "description": "A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.", + "type": "object", + "properties": { + "tags": { + "description": "A collection of image tags.", + "type": "array", + "items": { + "type": "string", + "x-nullable": true + }, + "x-nullable": true + }, + "captions": { + "description": "A list of captions, sorted by confidence level.", + "type": "array", + "items": { + "$ref": "#/definitions/ImageCaption" + }, + "x-nullable": true + } + }, + "x-nullable": true + }, + "FaceDescription": { + "description": "An object describing a face identified in the image.", + "type": "object", + "properties": { + "age": { + "format": "int32", + "description": "Possible age of the face.", + "type": "integer", + "x-nullable": false + }, + "gender": { + "description": "Possible gender of the face.", + "enum": [ + "Male", + "Female" + ], + "type": "string", + "x-ms-enum": { + "name": "Gender", + "modelAsString": false + }, + "x-nullable": true + }, + "faceRectangle": { + "$ref": "#/definitions/FaceRectangle", + "description": "Rectangle in the image containing the identified face." + } + }, + "x-nullable": true + }, + "DetectedObject": { + "description": "An object detected in an image.", + "type": "object", + "properties": { + "rectangle": { + "$ref": "#/definitions/BoundingRect", + "description": "Approximate location of the detected object.", + "readOnly": true + }, + "object": { + "description": "Label for the object.", + "type": "string", + "x-nullable": true + }, + "confidence": { + "format": "double", + "description": "Confidence score of having observed the object in the image, as a value ranging from 0 to 1.", + "type": "number", + "x-nullable": false + }, + "parent": { + "$ref": "#/definitions/ObjectHierarchy", + "description": "The parent object, from a taxonomy perspective.\r\nThe parent object is a more generic form of this object. For example, a 'bulldog' would have a parent of 'dog'." + } + }, + "x-nullable": true + }, + "DetectedBrand": { + "description": "A brand detected in an image.", + "type": "object", + "properties": { + "name": { + "description": "Label for the brand.", + "type": "string", + "readOnly": true, + "x-nullable": true + }, + "confidence": { + "format": "double", + "description": "Confidence score of having observed the brand in the image, as a value ranging from 0 to 1.", + "type": "number", + "readOnly": true, + "x-nullable": false + }, + "rectangle": { + "$ref": "#/definitions/BoundingRect", + "description": "Approximate location of the detected brand.", + "readOnly": true + } + }, + "x-nullable": true + }, + "ImageMetadata": { + "description": "Image metadata.", + "type": "object", + "properties": { + "width": { + "format": "int32", + "description": "Image width, in pixels.", + "type": "integer", + "x-nullable": false + }, + "height": { + "format": "int32", + "description": "Image height, in pixels.", + "type": "integer", + "x-nullable": false + }, + "format": { + "description": "Image format.", + "type": "string", + "x-nullable": true + } + }, + "x-nullable": true + }, + "CategoryDetail": { + "description": "An object describing additional category details.", + "type": "object", + "properties": { + "celebrities": { + "description": "An array of celebrities if any identified.", + "type": "array", + "items": { + "$ref": "#/definitions/CelebritiesModel" + }, + "x-nullable": true + }, + "landmarks": { + "description": "An array of landmarks if any identified.", + "type": "array", + "items": { + "$ref": "#/definitions/LandmarksModel" + }, + "x-nullable": true + } + }, + "x-nullable": true + }, + "ImageCaption": { + "description": "An image caption, i.e. a brief description of what the image depicts.", + "type": "object", + "properties": { + "text": { + "description": "The text of the caption.", + "type": "string", + "x-nullable": true + }, + "confidence": { + "format": "double", + "description": "The level of confidence the service has in the caption.", + "type": "number", + "x-nullable": false + } + }, + "x-nullable": true + }, + "FaceRectangle": { + "description": "An object describing face rectangle.", + "type": "object", + "properties": { + "left": { + "format": "int32", + "description": "X-coordinate of the top left point of the face, in pixels.", + "type": "integer", + "x-nullable": false + }, + "top": { + "format": "int32", + "description": "Y-coordinate of the top left point of the face, in pixels.", + "type": "integer", + "x-nullable": false + }, + "width": { + "format": "int32", + "description": "Width measured from the top-left point of the face, in pixels.", + "type": "integer", + "x-nullable": false + }, + "height": { + "format": "int32", + "description": "Height measured from the top-left point of the face, in pixels.", + "type": "integer", + "x-nullable": false + } + }, + "x-nullable": true + }, + "BoundingRect": { + "description": "A bounding box for an area inside an image.", + "type": "object", + "properties": { + "x": { + "format": "int32", + "description": "X-coordinate of the top left point of the area, in pixels.", + "type": "integer", + "x-nullable": false + }, + "y": { + "format": "int32", + "description": "Y-coordinate of the top left point of the area, in pixels.", + "type": "integer", + "x-nullable": false + }, + "w": { + "format": "int32", + "description": "Width measured from the top-left point of the area, in pixels.", + "type": "integer", + "x-nullable": false + }, + "h": { + "format": "int32", + "description": "Height measured from the top-left point of the area, in pixels.", + "type": "integer", + "x-nullable": false + } + }, + "x-nullable": false + }, + "ObjectHierarchy": { + "description": "An object detected inside an image.", + "type": "object", + "properties": { + "object": { + "description": "Label for the object.", + "type": "string", + "x-nullable": true + }, + "confidence": { + "format": "double", + "description": "Confidence score of having observed the object in the image, as a value ranging from 0 to 1.", + "type": "number", + "x-nullable": false + }, + "parent": { + "$ref": "#/definitions/ObjectHierarchy", + "description": "The parent object, from a taxonomy perspective.\r\nThe parent object is a more generic form of this object. For example, a 'bulldog' would have a parent of 'dog'." + } + }, + "x-nullable": true + }, + "CelebritiesModel": { + "description": "An object describing possible celebrity identification.", + "type": "object", + "properties": { + "name": { + "description": "Name of the celebrity.", + "type": "string", + "x-nullable": true + }, + "confidence": { + "format": "double", + "description": "Confidence level for the celebrity recognition as a value ranging from 0 to 1.", + "type": "number", + "x-nullable": false + }, + "faceRectangle": { + "$ref": "#/definitions/FaceRectangle", + "description": "Location of the identified face in the image." + } + }, + "x-nullable": true + }, + "LandmarksModel": { + "description": "A landmark recognized in the image.", + "type": "object", + "properties": { + "name": { + "description": "Name of the landmark.", + "type": "string", + "x-nullable": true + }, + "confidence": { + "format": "double", + "description": "Confidence level for the landmark recognition as a value ranging from 0 to 1.", + "type": "number", + "x-nullable": false + } + }, + "x-nullable": true + }, + "ImageDescription": { + "description": "A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.", + "type": "object", + "properties": { + "description": { + "$ref": "#/definitions/ImageDescriptionDetails", + "description": "A collection of content tags, along with a list of captions sorted by confidence level, and image metadata.", + "x-ms-client-flatten": true + }, + "requestId": { + "description": "Id of the REST API request.", + "type": "string", + "x-nullable": true + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + }, + "x-nullable": true + }, + "DetectResult": { + "description": "Result of a DetectImage call.", + "type": "object", + "properties": { + "objects": { + "description": "An array of detected objects.", + "type": "array", + "items": { + "$ref": "#/definitions/DetectedObject" + }, + "readOnly": true, + "x-nullable": true + }, + "requestId": { + "description": "Id of the REST API request.", + "type": "string", + "x-nullable": true + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + }, + "x-nullable": true + }, + "ListModelsResult": { + "description": "Result of the List Domain Models operation.", + "type": "object", + "properties": { + "models": { + "description": "An array of supported models.", + "type": "array", + "items": { + "$ref": "#/definitions/ModelDescription" + }, + "readOnly": true, + "x-nullable": true + } + }, + "x-nullable": true + }, + "ModelDescription": { + "description": "An object describing supported model by name and categories.", + "type": "object", + "properties": { + "name": { + "description": "The name of the model.", + "type": "string", + "x-nullable": true + }, + "categories": { + "description": "Categories of the model.", + "type": "array", + "items": { + "type": "string", + "x-nullable": true + }, + "x-nullable": true + } + }, + "x-nullable": true + }, + "DomainModelResults": { + "description": "Result of image analysis using a specific domain model including additional metadata.", + "type": "object", + "properties": { + "result": { + "description": "Model-specific response.", + "type": "object", + "x-ms-client-flatten": true + }, + "requestId": { + "description": "Id of the REST API request.", + "type": "string", + "x-nullable": true + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + }, + "x-nullable": true + }, + "OcrResult": { + "type": "object", + "properties": { + "language": { + "description": "The BCP-47 language code of the text in the image.", + "type": "string", + "x-nullable": true + }, + "textAngle": { + "format": "double", + "description": "The angle, in radians, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly.", + "type": "number", + "x-nullable": false + }, + "orientation": { + "description": "Orientation of the text recognized in the image, if requested. The value (up, down, left, or right) refers to the direction that the top of the recognized text is facing, after the image has been rotated around its center according to the detected text angle (see textAngle property).\r\nIf detection of the orientation was not requested, or no text is detected, the value is 'NotDetected'.", + "type": "string", + "x-nullable": true + }, + "regions": { + "description": "An array of objects, where each object represents a region of recognized text.", + "type": "array", + "items": { + "$ref": "#/definitions/OcrRegion" + }, + "x-nullable": true + } + }, + "x-nullable": true + }, + "OcrRegion": { + "description": "A region consists of multiple lines (e.g. a column of text in a multi-column document).", + "type": "object", + "properties": { + "boundingBox": { + "description": "Bounding box of a recognized region. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down.", + "type": "string", + "x-nullable": true + }, + "lines": { + "description": "An array of recognized lines of text.", + "type": "array", + "items": { + "$ref": "#/definitions/OcrLine" + }, + "x-nullable": true + } + }, + "x-nullable": true + }, + "OcrLine": { + "description": "An object describing a single recognized line of text.", + "type": "object", + "properties": { + "boundingBox": { + "description": "Bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down.", + "type": "string", + "x-nullable": true + }, + "words": { + "description": "An array of objects, where each object represents a recognized word.", + "type": "array", + "items": { + "$ref": "#/definitions/OcrWord" + }, + "x-nullable": true + } + }, + "x-nullable": true + }, + "OcrWord": { + "description": "Information on a recognized word.", + "type": "object", + "properties": { + "boundingBox": { + "description": "Bounding box of a recognized word. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down.", + "type": "string", + "x-nullable": true + }, + "text": { + "description": "String value of a recognized word.", + "type": "string", + "x-nullable": true + } + }, + "x-nullable": true + }, + "TagResult": { + "description": "The results of a image tag operation, including any tags and image metadata.", + "type": "object", + "properties": { + "tags": { + "description": "A list of tags with confidence level.", + "type": "array", + "items": { + "$ref": "#/definitions/ImageTag" + }, + "x-nullable": true + }, + "requestId": { + "description": "Id of the REST API request.", + "type": "string", + "x-nullable": true + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + }, + "x-nullable": true + }, + "AreaOfInterestResult": { + "description": "Result of AreaOfInterest operation.", + "type": "object", + "properties": { + "areaOfInterest": { + "$ref": "#/definitions/BoundingRect", + "description": "A bounding box for an area of interest inside an image.", + "readOnly": true + }, + "requestId": { + "description": "Id of the REST API request.", + "type": "string", + "x-nullable": true + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + }, + "x-nullable": true + }, + "ImageUrl": { + "required": [ + "url" + ], + "type": "object", + "properties": { + "url": { + "description": "Publicly reachable URL of an image.", + "type": "string" + } + } + }, + "ComputerVisionError": { + "description": "Details about the API request error.", + "required": [ + "code", + "message" + ], + "type": "object", + "properties": { + "code": { + "description": "The error code.", + "enum": [ + "InvalidImageFormat", + "UnsupportedMediaType", + "InvalidImageUrl", + "NotSupportedFeature", + "NotSupportedImage", + "Timeout", + "InternalServerError", + "InvalidImageSize", + "BadArgument", + "DetectFaceError", + "NotSupportedLanguage", + "InvalidThumbnailSize", + "InvalidDetails", + "InvalidModel", + "CancelledRequest", + "NotSupportedVisualFeature", + "FailedToProcess", + "Unspecified", + "StorageException" + ], + "x-ms-enum": { + "name": "ComputerVisionErrorCodes", + "modelAsString": true + } + }, + "message": { + "description": "A message explaining the error reported by the service.", + "type": "string" + }, + "requestId": { + "description": "A unique request identifier.", + "type": "string" + } + } + }, + "LandmarkResults": { + "description": "Result of domain-specific classifications for the domain of landmarks.", + "type": "object", + "properties": { + "landmarks": { + "description": "List of landmarks recognized in the image.", + "type": "array", + "items": { + "$ref": "#/definitions/LandmarksModel" + }, + "x-nullable": true + }, + "requestId": { + "description": "Id of the REST API request.", + "type": "string", + "x-nullable": true + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + }, + "x-nullable": true + }, + "CelebrityResults": { + "description": "Result of domain-specific classifications for the domain of celebrities.", + "type": "object", + "properties": { + "celebrities": { + "description": "List of celebrities recognized in the image.", + "type": "array", + "items": { + "$ref": "#/definitions/CelebritiesModel" + }, + "x-nullable": true + }, + "requestId": { + "description": "Id of the REST API request.", + "type": "string", + "x-nullable": true + }, + "metadata": { + "$ref": "#/definitions/ImageMetadata" + } + }, + "x-nullable": true + } + }, + "parameters": { + "Endpoint": { + "name": "Endpoint", + "in": "path", + "description": "Supported Cognitive Services endpoints.", + "required": true, + "type": "string", + "x-ms-parameter-location": "client", + "x-ms-skip-url-encoding": true + }, + "ImageStream": { + "name": "Image", + "in": "body", + "description": "An image stream.", + "required": true, + "schema": { + "format": "file", + "type": "object" + }, + "x-ms-parameter-location": "method" + }, + "ImageUrl": { + "name": "ImageUrl", + "in": "body", + "description": "A JSON document with a URL pointing to the image that is to be analyzed.", + "required": true, + "schema": { + "$ref": "#/definitions/ImageUrl" + }, + "x-ms-parameter-location": "method", + "x-ms-client-flatten": true + }, + "ServiceLanguage": { + "name": "language", + "in": "query", + "description": "The desired language for output generation. If this parameter is not specified, the default value is "en".Supported languages:en - English, Default. es - Spanish, ja - Japanese, pt - Portuguese, zh - Simplified Chinese.", + "required": false, + "type": "string", + "default": "en", + "enum": [ + "en", + "es", + "ja", + "pt", + "zh" + ], + "x-ms-parameter-location": "method", + "x-nullable": false + }, + "DescriptionExclude": { + "name": "descriptionExclude", + "in": "query", + "description": "Turn off specified domain models when generating the description.", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "Celebrities", + "Landmarks" + ], + "x-nullable": false, + "x-ms-enum": { + "name": "DescriptionExclude", + "modelAsString": false + } + }, + "collectionFormat": "csv", + "x-nullable": true, + "x-ms-parameter-location": "method" + }, + "OcrLanguage": { + "name": "language", + "in": "query", + "description": "The BCP-47 language code of the text to be detected in the image. The default value is 'unk'.", + "required": false, + "type": "string", + "default": "unk", + "enum": [ + "unk", + "zh-Hans", + "zh-Hant", + "cs", + "da", + "nl", + "en", + "fi", + "fr", + "de", + "el", + "hu", + "it", + "ja", + "ko", + "nb", + "pl", + "pt", + "ru", + "es", + "sv", + "tr", + "ar", + "ro", + "sr-Cyrl", + "sr-Latn", + "sk" + ], + "x-ms-parameter-location": "method", + "x-nullable": false, + "x-ms-enum": { + "name": "OcrLanguages", + "modelAsString": false + } + }, + "VisualFeatures": { + "name": "visualFeatures", + "in": "query", + "description": "A string indicating what visual feature types to return. Multiple values should be comma-separated. Valid visual feature types include: Categories - categorizes image content according to a taxonomy defined in documentation. Tags - tags the image with a detailed list of words related to the image content. Description - describes the image content with a complete English sentence. Faces - detects if faces are present. If present, generate coordinates, gender and age. ImageType - detects if image is clipart or a line drawing. Color - determines the accent color, dominant color, and whether an image is black&white. Adult - detects if the image is pornographic in nature (depicts nudity or a sex act), or is gory (depicts extreme violence or blood). Sexually suggestive content (aka racy content) is also detected. Objects - detects various objects within an image, including the approximate location. The Objects argument is only available in English. Brands - detects various brands within an image, including the approximate location. The Brands argument is only available in English.", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "ImageType", + "Faces", + "Adult", + "Categories", + "Color", + "Tags", + "Description", + "Objects", + "Brands" + ], + "x-nullable": false, + "x-ms-enum": { + "name": "VisualFeatureTypes", + "modelAsString": false + } + }, + "collectionFormat": "csv", + "x-ms-parameter-location": "method" + }, + "DetectOrientation": { + "name": "detectOrientation", + "in": "query", + "description": "Whether detect the text orientation in the image. With detectOrientation=true the OCR service tries to detect the image orientation and correct it before further processing (e.g. if it's upside-down).", + "required": true, + "type": "boolean", + "default": true, + "x-ms-parameter-location": "method" + } + }, + "x-ms-paths": { + "/analyze?overload=stream": { + "post": { + "description": "This operation extracts a rich set of visual features based on the image content.\r\nTwo input methods are supported -- (1) Uploading an image or (2) specifying an image URL. Within your request, there is an optional parameter to allow you to choose which features to return. By default, image categories are returned in the response.\r\nA successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "AnalyzeImageInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/VisualFeatures" + }, + { + "name": "details", + "in": "query", + "description": "A string indicating which domain-specific details to return. Multiple values should be comma-separated. Valid visual feature types include: Celebrities - identifies celebrities if detected in the image, Landmarks - identifies notable landmarks in the image.", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "Celebrities", + "Landmarks" + ], + "x-nullable": false, + "x-ms-enum": { + "name": "Details", + "modelAsString": false + } + }, + "collectionFormat": "csv", + "x-nullable": true + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "#/parameters/DescriptionExclude" + }, + { + "$ref": "#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "The response include the extracted features in JSON format. Here is the definitions for enumeration types:\r\n ClipartType\r\n Non - clipart = 0, ambiguous = 1, normal - clipart = 2, good - clipart = 3. LineDrawingTypeNon - LineDrawing = 0, LineDrawing = 1.", + "schema": { + "$ref": "#/definitions/ImageAnalysis" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful AnalyzeImage request": { + "$ref": "./examples/SuccessfulAnalyzeImageWithStream.json" + } + } + } + }, + "/areaOfInterest?overload=stream": { + "post": { + "description": "This operation returns a bounding box around the most important area of the image.\r\nA successful response will be returned in JSON. If the request failed, the response contains an error code and a message to help determine what went wrong.\r\nUpon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.", + "operationId": "GetAreaOfInterestInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "The response includes the extracted area of interest in JSON format.", + "schema": { + "$ref": "#/definitions/AreaOfInterestResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful GetAreaOfInterest request": { + "$ref": "./examples/SuccessfulGetAreaOfInterestWithStream.json" + } + } + } + }, + "/describe?overload=stream": { + "post": { + "description": "This operation generates a description of an image in human readable language with complete sentences. The description is based on a collection of content tags, which are also returned by the operation. More than one description can be generated for each image. Descriptions are ordered by their confidence score. Descriptions may include results from celebrity and landmark domain models, if applicable.\r\nTwo input methods are supported -- (1) Uploading an image or (2) specifying an image URL.\r\nA successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "DescribeImageInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "maxCandidates", + "in": "query", + "description": "Maximum number of candidate descriptions to be returned. The default is 1.", + "required": false, + "type": "integer", + "format": "int32", + "default": 1, + "x-nullable": true + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "#/parameters/DescriptionExclude" + }, + { + "$ref": "#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "Image description object.", + "schema": { + "$ref": "#/definitions/ImageDescription" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful DescribeImage request": { + "$ref": "./examples/SuccessfulDescribeImageWithStream.json" + } + } + } + }, + "/detect?overload=stream": { + "post": { + "description": "Performs object detection on the specified image.\r\nTwo input methods are supported -- (1) Uploading an image or (2) specifying an image URL.\r\nA successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "DetectObjectsInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "The response include the detected objects in JSON format.", + "schema": { + "$ref": "#/definitions/DetectResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful DetectObjects request": { + "$ref": "./examples/SuccessfulDetectObjectsWithStream.json" + } + } + } + }, + "/generateThumbnail?overload=stream": { + "post": { + "description": "This operation generates a thumbnail image with the user-specified width and height. By default, the service analyzes the image, identifies the region of interest (ROI), and generates smart cropping coordinates based on the ROI. Smart cropping helps when you specify an aspect ratio that differs from that of the input image.\r\nA successful response contains the thumbnail image binary. If the request failed, the response contains an error code and a message to help determine what went wrong.\r\nUpon failure, the error code and an error message are returned. The error code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.", + "operationId": "GenerateThumbnailInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/octet-stream" + ], + "parameters": [ + { + "name": "width", + "in": "query", + "description": "Width of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50.", + "required": true, + "type": "integer", + "format": "int32", + "maximum": 1024, + "minimum": 1, + "x-nullable": false + }, + { + "name": "height", + "in": "query", + "description": "Height of the thumbnail, in pixels. It must be between 1 and 1024. Recommended minimum of 50.", + "required": true, + "type": "integer", + "format": "int32", + "maximum": 1024, + "minimum": 1, + "x-nullable": false + }, + { + "name": "smartCropping", + "in": "query", + "description": "Boolean flag for enabling smart cropping.", + "required": false, + "type": "boolean", + "default": false, + "x-nullable": true + }, + { + "$ref": "#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "The generated thumbnail in binary format.", + "schema": { + "type": "file" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful GenerateThumbnail request": { + "$ref": "./examples/SuccessfulGenerateThumbnailWithStream.json" + } + } + } + }, + "/models/{model}/analyze?overload=stream": { + "post": { + "description": "This operation recognizes content within an image by applying a domain-specific model. The list of domain-specific models that are supported by the Computer Vision API can be retrieved using the /models GET request. Currently, the API provides following domain-specific models: celebrities, landmarks.\r\nTwo input methods are supported -- (1) Uploading an image or (2) specifying an image URL.\r\nA successful response will be returned in JSON.\r\nIf the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "AnalyzeImageByDomainInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "model", + "in": "path", + "description": "The domain-specific content to recognize.", + "required": true, + "type": "string", + "x-nullable": true + }, + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "Analysis result based on the domain model.", + "schema": { + "$ref": "#/definitions/DomainModelResults" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful AnalyzeImageByDomain request": { + "$ref": "./examples/SuccessfulAnalyzeImageByDomainWithStream.json" + } + } + } + }, + "/ocr?overload=stream": { + "post": { + "description": "Optical Character Recognition (OCR) detects text in an image and extracts the recognized characters into a machine-usable character stream.\r\nUpon success, the OCR results will be returned.\r\nUpon failure, the error code together with an error message will be returned. The error code can be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or InternalServerError.", + "operationId": "RecognizePrintedTextInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/DetectOrientation" + }, + { + "$ref": "#/parameters/OcrLanguage" + }, + { + "$ref": "#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "The OCR results in the hierarchy of region/line/word. The results include text, bounding box for regions, lines and words. The angle, in radians, of the detected text with respect to the closest horizontal or vertical direction. After rotating the input image clockwise by this angle, the recognized text lines become horizontal or vertical. In combination with the orientation property it can be used to overlay recognition results correctly on the original image, by rotating either the original image or recognition results by a suitable angle around the center of the original image. If the angle cannot be confidently detected, this property is not present. If the image contains text at different angles, only part of the text will be recognized correctly.", + "schema": { + "$ref": "#/definitions/OcrResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful RecognizePrintedText request": { + "$ref": "./examples/SuccessfulRecognizePrintedTextWithStream.json" + } + } + } + }, + "/tag?overload=stream": { + "post": { + "description": "This operation generates a list of words, or tags, that are relevant to the content of the supplied image. The Computer Vision API can return tags based on objects, living beings, scenery or actions found in images. Unlike categories, tags are not organized according to a hierarchical classification system, but correspond to image content. Tags may contain hints to avoid ambiguity or provide context, for example the tag \"ascomycete\" may be accompanied by the hint \"fungus\".\r\nTwo input methods are supported -- (1) Uploading an image or (2) specifying an image URL.\r\nA successful response will be returned in JSON. If the request failed, the response will contain an error code and a message to help understand what went wrong.", + "operationId": "TagImageInStream", + "consumes": [ + "application/octet-stream", + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "$ref": "#/parameters/ServiceLanguage" + }, + { + "$ref": "#/parameters/ImageStream" + } + ], + "responses": { + "200": { + "description": "Image tags object.", + "schema": { + "$ref": "#/definitions/TagResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful TagImage request": { + "$ref": "./examples/SuccessfulTagImageWithStream.json" + } + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/Ocr.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/Ocr.json new file mode 100644 index 000000000000..9d0e2df9700a --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/Ocr.json @@ -0,0 +1,452 @@ +{ + "swagger": "2.0", + "info": { + "version": "3.1", + "title": "Computer Vision Client", + "description": "The Computer Vision API provides state-of-the-art algorithms to process images and return information. For example, it can be used to determine if an image contains mature content, or it can be used to find all the faces in an image. It also has other features like estimating dominant and accent colors, categorizing the content of images, and describing an image with complete English sentences. Additionally, it can also intelligently generate images thumbnails for displaying large images effectively." + }, + "securityDefinitions": { + "apim_key": { + "type": "apiKey", + "name": "Ocp-Apim-Subscription-Key", + "in": "header" + } + }, + "security": [ + { + "apim_key": [] + } + ], + "x-ms-parameterized-host": { + "hostTemplate": "{Endpoint}", + "useSchemePrefix": false, + "parameters": [ + { + "$ref": "#/parameters/Endpoint" + } + ] + }, + "host": "westcentralus.api.cognitive.microsoft.com", + "basePath": "/vision/v3.1", + "schemes": [ + "https" + ], + "paths": { + "/read/analyze": { + "post": { + "description": "Use this interface to get the result of a Read operation, employing the state-of-the-art Optical Character Recognition (OCR) algorithms optimized for text-heavy documents. When you use the Read interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your 'GetReadResult' operation to access OCR results.​", + "operationId": "Read", + "parameters": [ + { + "$ref": "#/parameters/OcrDetectionLanguage" + }, + { + "$ref": "#/parameters/ImageUrl" + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "responses": { + "202": { + "description": "The service has accepted the request and will start processing later.", + "headers": { + "Operation-Location": { + "description": "URL to query for status of the operation. The operation ID will expire in 48 hours. ", + "type": "string" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Analyze request": { + "$ref": "./examples/SuccessfulReadWithUrl.json" + } + } + } + }, + "/read/analyzeResults/{operationId}": { + "get": { + "description": "This interface is used for getting OCR results of Read operation. The URL to this interface should be retrieved from 'Operation-Location' field returned from Read interface.", + "operationId": "GetReadResult", + "parameters": [ + { + "name": "operationId", + "in": "path", + "description": "Id of read operation returned in the response of the 'Read' interface.", + "required": true, + "type": "string", + "format": "uuid" + } + ], + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "Returns the read operation status.", + "schema": { + "$ref": "#/definitions/ReadOperationResult" + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Get Read Result request": { + "$ref": "./examples/SuccessfulGetReadResult.json" + } + } + } + } + }, + "x-ms-paths": { + "/read/analyze?overload=stream": { + "post": { + "description": "Use this interface to get the result of a Read operation, employing the state-of-the-art Optical Character Recognition (OCR) algorithms optimized for text-heavy documents. When you use the Read interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your 'GetReadResult' operation to access OCR results.​", + "operationId": "ReadInStream", + "parameters": [ + { + "$ref": "#/parameters/OcrDetectionLanguage" + }, + { + "$ref": "#/parameters/ImageStream" + } + ], + "consumes": [ + "application/octet-stream" + ], + "produces": [ + "application/json" + ], + "responses": { + "202": { + "description": "The service has accepted the request and will start processing later.", + "headers": { + "Operation-Location": { + "description": "URL to query for status of the operation. The operation ID will expire in 48 hours. ", + "type": "string" + } + } + }, + "default": { + "description": "Error response.", + "schema": { + "$ref": "#/definitions/ComputerVisionError" + } + } + }, + "x-ms-examples": { + "Successful Analyze request": { + "$ref": "./examples/SuccessfulReadWithStream.json" + } + } + } + } + }, + "definitions": { + "ReadOperationResult": { + "type": "object", + "description": "OCR result of the read operation.", + "properties": { + "status": { + "description": "Status of the read operation.", + "$ref": "#/definitions/OperationStatus" + }, + "createdDateTime": { + "type": "string", + "description": "Get UTC date time the batch operation was submitted.", + "x-nullable": false + }, + "lastUpdatedDateTime": { + "type": "string", + "description": "Get last updated UTC date time of this batch operation.", + "x-nullable": false + }, + "analyzeResult": { + "description": "Analyze batch operation result.", + "type": "object", + "$ref": "#/definitions/analyzeResults" + } + } + }, + "OperationStatus": { + "type": "string", + "description": "Status code of the text operation.", + "enum": [ + "notStarted", + "running", + "failed", + "succeeded" + ], + "x-ms-enum": { + "name": "OperationStatusCodes", + "modelAsString": false + }, + "x-nullable": false + }, + "ReadResult": { + "description": "Text extracted from a page in the input document.", + "type": "object", + "required": [ + "page", + "angle", + "width", + "height", + "unit", + "lines" + ], + "properties": { + "page": { + "description": "The 1-based page number of the recognition result.", + "type": "integer" + }, + "language": { + "description": "The BCP-47 language code of the recognized text page.", + "type": "string" + }, + "angle": { + "description": "The orientation of the image in degrees in the clockwise direction. Range between [-180, 180).", + "type": "number" + }, + "width": { + "description": "The width of the image in pixels or the PDF in inches.", + "type": "number" + }, + "height": { + "description": "The height of the image in pixels or the PDF in inches.", + "type": "number" + }, + "unit": { + "description": "The unit used in the Width, Height and BoundingBox. For images, the unit is 'pixel'. For PDF, the unit is 'inch'.", + "type": "string", + "enum": [ + "pixel", + "inch" + ], + "x-ms-enum": { + "name": "TextRecognitionResultDimensionUnit", + "modelAsString": false + }, + "x-nullable": false + }, + "lines": { + "description": "A list of recognized text lines.", + "type": "array", + "items": { + "$ref": "#/definitions/Line" + } + } + } + }, + "analyzeResults": { + "description": "Analyze batch operation result.", + "type": "object", + "required": [ + "version", + "readResults" + ], + "properties": { + "version": { + "description": "Version of schema used for this result.", + "type": "string" + }, + "readResults": { + "description": "Text extracted from the input.", + "type": "array", + "items": { + "$ref": "#/definitions/ReadResult" + } + } + } + }, + "Line": { + "description": "An object representing a recognized text line.", + "type": "object", + "required": [ + "boundingBox", + "text", + "words" + ], + "properties": { + "language": { + "description": "The BCP-47 language code of the recognized text line. Only provided where the language of the line differs from the page's.", + "type": "string" + }, + "boundingBox": { + "description": "Bounding box of a recognized line.", + "$ref": "#/definitions/BoundingBox" + }, + "text": { + "description": "The text content of the line.", + "type": "string" + }, + "words": { + "description": "List of words in the text line.", + "type": "array", + "items": { + "$ref": "#/definitions/Word" + } + } + } + }, + "Word": { + "description": "An object representing a recognized word.", + "type": "object", + "required": [ + "boundingBox", + "text", + "confidence" + ], + "properties": { + "boundingBox": { + "description": "Bounding box of a recognized word.", + "$ref": "#/definitions/BoundingBox" + }, + "text": { + "description": "The text content of the word.", + "type": "string" + }, + "confidence": { + "description": "Qualitative confidence measure.", + "type": "number", + "format": "float" + } + } + }, + "BoundingBox": { + "description": "Quadrangle bounding box, with coordinates in original image. The eight numbers represent the four points (x-coordinate, y-coordinate from the left-top corner of the image) of the detected rectangle from the left-top corner in the clockwise direction. For images, coordinates are in pixels. For PDF, coordinates are in inches.", + "type": "array", + "items": { + "type": "number", + "x-nullable": false + } + }, + "ComputerVisionError": { + "description": "Details about the API request error.", + "required": [ + "code", + "message" + ], + "type": "object", + "properties": { + "code": { + "description": "The error code.", + "enum": [ + "InvalidImageFormat", + "UnsupportedMediaType", + "InvalidImageUrl", + "NotSupportedFeature", + "NotSupportedImage", + "Timeout", + "InternalServerError", + "InvalidImageSize", + "BadArgument", + "DetectFaceError", + "NotSupportedLanguage", + "InvalidThumbnailSize", + "InvalidDetails", + "InvalidModel", + "CancelledRequest", + "NotSupportedVisualFeature", + "FailedToProcess", + "Unspecified", + "StorageException" + ], + "x-ms-enum": { + "name": "ComputerVisionErrorCodes", + "modelAsString": true + } + }, + "message": { + "description": "A message explaining the error reported by the service.", + "type": "string" + }, + "requestId": { + "description": "A unique request identifier.", + "type": "string" + } + } + }, + "ImageUrl": { + "type": "object", + "required": [ + "url" + ], + "properties": { + "url": { + "description": "Publicly reachable URL of an image.", + "type": "string" + } + } + } + }, + "parameters": { + "OcrDetectionLanguage": { + "name": "language", + "in": "query", + "description": "The BCP-47 language code of the text in the document. Currently, only English ('en'), Dutch (‘nl’), French (‘fr’), German (‘de’), Italian (‘it’), Portuguese (‘pt), and Spanish ('es') are supported. Read supports auto language identification and multi-language documents, so only provide a language code if you would like to force the documented to be processed as that specific language.", + "required": false, + "default": "en", + "x-ms-parameter-location": "method", + "type": "string", + "x-ms-enum": { + "name": "OcrDetectionLanguage", + "modelAsString": true + }, + "enum": [ + "en", + "es", + "fr", + "de", + "it", + "nl", + "pt" + ] + }, + "ImageUrl": { + "name": "ImageUrl", + "in": "body", + "required": true, + "x-ms-parameter-location": "method", + "x-ms-client-flatten": true, + "description": "A JSON document with a URL pointing to the image that is to be analyzed.", + "schema": { + "$ref": "#/definitions/ImageUrl" + } + }, + "ImageStream": { + "name": "Image", + "in": "body", + "required": true, + "x-ms-parameter-location": "method", + "description": "An image stream.", + "schema": { + "type": "object", + "format": "file" + } + }, + "Endpoint": { + "name": "Endpoint", + "description": "Supported Cognitive Services endpoints.", + "x-ms-parameter-location": "client", + "required": true, + "type": "string", + "in": "path", + "x-ms-skip-url-encoding": true + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageByDomainWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageByDomainWithStream.json new file mode 100644 index 000000000000..a3cd00555cc6 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageByDomainWithStream.json @@ -0,0 +1,34 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "model": "Celebrities", + "Image": "{binary}" + }, + "responses": { + "200": { + "body": { + "requestId": "f0027b4b-dc0d-4082-9228-1545ed246b03", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "result": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ] + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageByDomainWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageByDomainWithUrl.json new file mode 100644 index 000000000000..92ead72b5f8f --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageByDomainWithUrl.json @@ -0,0 +1,36 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "model": "Celebrities", + "ImageUrl": { + "url": "{url}" + } + }, + "responses": { + "200": { + "body": { + "requestId": "f0027b4b-dc0d-4082-9228-1545ed246b03", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "result": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ] + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageWithStream.json new file mode 100644 index 000000000000..ba86791a7e36 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageWithStream.json @@ -0,0 +1,176 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "visualFeatures": [ + "Categories", + "Adult", + "Tags", + "Description", + "Faces", + "Color", + "ImageType", + "Objects", + "Brands" + ], + "details": [ + "Celebrities", + "Landmarks" + ], + "language": "en", + "Image": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "categories": [ + { + "name": "abstract_", + "score": 0.00390625 + }, + { + "name": "people_", + "score": 0.83984375, + "detail": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ], + "landmarks": [ + { + "name": "Forbidden City", + "confidence": 0.9978346 + } + ] + } + } + ], + "adult": { + "isAdultContent": false, + "isRacyContent": false, + "isGoryContent": false, + "adultScore": 0.0934349000453949, + "racyScore": 0.068613491952419281, + "goreScore": 0.012872257380997575 + }, + "tags": [ + { + "name": "person", + "confidence": 0.98979085683822632 + }, + { + "name": "man", + "confidence": 0.94493889808654785 + }, + { + "name": "outdoor", + "confidence": 0.938492476940155 + }, + { + "name": "window", + "confidence": 0.89513939619064331 + }, + { + "name": "pangolin", + "confidence": 0.7250059783791661, + "hint": "mammal" + } + ], + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + } + ] + }, + "requestId": "0dbec5ad-a3d3-4f7e-96b4-dfd57efe967d", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "faces": [ + { + "age": 44, + "gender": "Male", + "faceRectangle": { + "left": 593, + "top": 160, + "width": 250, + "height": 250 + } + } + ], + "color": { + "dominantColorForeground": "Brown", + "dominantColorBackground": "Brown", + "dominantColors": [ + "Brown", + "Black" + ], + "accentColor": "873B59", + "isBWImg": false + }, + "imageType": { + "clipArtType": 0, + "lineDrawingType": 0 + }, + "objects": [ + { + "rectangle": { + "x": 0, + "y": 0, + "w": 50, + "h": 50 + }, + "object": "tree", + "confidence": 0.9, + "parent": { + "object": "plant", + "confidence": 0.95 + } + } + ], + "brands": [ + { + "name": "Pepsi", + "confidence": 0.857, + "rectangle": { + "x": 489, + "y": 79, + "w": 161, + "h": 177 + } + }, + { + "name": "Coca-Cola", + "confidence": 0.893, + "rectangle": { + "x": 216, + "y": 55, + "w": 171, + "h": 372 + } + } + ] + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageWithUrl.json new file mode 100644 index 000000000000..be78226e0e16 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulAnalyzeImageWithUrl.json @@ -0,0 +1,184 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "visualFeatures": [ + "Categories", + "Adult", + "Tags", + "Description", + "Faces", + "Color", + "ImageType", + "Objects", + "Brands" + ], + "details": [ + "Celebrities", + "Landmarks" + ], + "language": "en", + "ImageUrl": { + "url": "{url}" + } + }, + "responses": { + "200": { + "headers": {}, + "body": { + "categories": [ + { + "name": "abstract_", + "score": 0.00390625 + }, + { + "name": "people_", + "score": 0.83984375, + "detail": { + "celebrities": [ + { + "name": "Satya Nadella", + "faceRectangle": { + "left": 597, + "top": 162, + "width": 248, + "height": 248 + }, + "confidence": 0.999028444 + } + ] + } + }, + { + "name": "building_", + "score": 0.984375, + "detail": { + "landmarks": [ + { + "name": "Forbidden City", + "confidence": 0.98290169239044189 + } + ] + } + } + ], + "adult": { + "isAdultContent": false, + "isRacyContent": false, + "isGoryContent": false, + "adultScore": 0.0934349000453949, + "racyScore": 0.068613491952419281, + "goreScore": 0.012872257380997575 + }, + "tags": [ + { + "name": "person", + "confidence": 0.98979085683822632 + }, + { + "name": "man", + "confidence": 0.94493889808654785 + }, + { + "name": "outdoor", + "confidence": 0.938492476940155 + }, + { + "name": "window", + "confidence": 0.89513939619064331 + }, + { + "name": "pangolin", + "confidence": 0.7250059783791661, + "hint": "mammal" + } + ], + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + } + ] + }, + "requestId": "0dbec5ad-a3d3-4f7e-96b4-dfd57efe967d", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + }, + "faces": [ + { + "age": 44, + "gender": "Male", + "faceRectangle": { + "left": 593, + "top": 160, + "width": 250, + "height": 250 + } + } + ], + "color": { + "dominantColorForeground": "Brown", + "dominantColorBackground": "Brown", + "dominantColors": [ + "Brown", + "Black" + ], + "accentColor": "873B59", + "isBWImg": false + }, + "imageType": { + "clipArtType": 0, + "lineDrawingType": 0 + }, + "objects": [ + { + "rectangle": { + "x": 0, + "y": 0, + "w": 50, + "h": 50 + }, + "object": "tree", + "confidence": 0.9, + "parent": { + "object": "plant", + "confidence": 0.95 + } + } + ], + "brands": [ + { + "name": "Pepsi", + "confidence": 0.857, + "rectangle": { + "x": 489, + "y": 79, + "w": 161, + "h": 177 + } + }, + { + "name": "Coca-Cola", + "confidence": 0.893, + "rectangle": { + "x": 216, + "y": 55, + "w": 171, + "h": 372 + } + } + ] + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDescribeImageWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDescribeImageWithStream.json new file mode 100644 index 000000000000..071248140600 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDescribeImageWithStream.json @@ -0,0 +1,44 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "maxCandidates": 1, + "Image": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + }, + { + "text": "Satya Nadella is sitting on a bench", + "confidence": 0.40037006815422832 + }, + { + "text": "Satya Nadella sitting in front of a building", + "confidence": 0.38035155997373377 + } + ] + }, + "requestId": "ed2de1c6-fb55-4686-b0da-4da6e05d283f", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDescribeImageWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDescribeImageWithUrl.json new file mode 100644 index 000000000000..087ac0515231 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDescribeImageWithUrl.json @@ -0,0 +1,46 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "maxCandidates": 1, + "ImageUrl": { + "url": "{url}" + } + }, + "responses": { + "200": { + "headers": {}, + "body": { + "description": { + "tags": [ + "person", + "man", + "outdoor", + "window", + "glasses" + ], + "captions": [ + { + "text": "Satya Nadella sitting on a bench", + "confidence": 0.48293603002174407 + }, + { + "text": "Satya Nadella is sitting on a bench", + "confidence": 0.40037006815422832 + }, + { + "text": "Satya Nadella sitting in front of a building", + "confidence": 0.38035155997373377 + } + ] + }, + "requestId": "ed2de1c6-fb55-4686-b0da-4da6e05d283f", + "metadata": { + "width": 1500, + "height": 1000, + "format": "Jpeg" + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDetectObjectsWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDetectObjectsWithStream.json new file mode 100644 index 000000000000..8f4ad7fd73aa --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDetectObjectsWithStream.json @@ -0,0 +1,35 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "Image": "{binary}" + }, + "responses": { + "200": { + "body": { + "objects": [ + { + "rectangle": { + "x": 0, + "y": 0, + "w": 50, + "h": 50 + }, + "object": "tree", + "confidence": 0.9, + "parent": { + "object": "plant", + "confidence": 0.95 + } + } + ], + "requestId": "1ad0e45e-b7b4-4be3-8042-53be96103337", + "metadata": { + "width": 100, + "height": 100, + "format": "Jpeg" + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDetectObjectsWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDetectObjectsWithUrl.json new file mode 100644 index 000000000000..7ea387eb41be --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulDetectObjectsWithUrl.json @@ -0,0 +1,37 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "ImageUrl": { + "url": "{url}" + } + }, + "responses": { + "200": { + "body": { + "objects": [ + { + "rectangle": { + "x": 0, + "y": 0, + "w": 50, + "h": 50 + }, + "object": "tree", + "confidence": 0.9, + "parent": { + "object": "plant", + "confidence": 0.95 + } + } + ], + "requestId": "1ad0e45e-b7b4-4be3-8042-53be96103337", + "metadata": { + "width": 100, + "height": 100, + "format": "Jpeg" + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGenerateThumbnailWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGenerateThumbnailWithStream.json new file mode 100644 index 000000000000..22d52fbe7644 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGenerateThumbnailWithStream.json @@ -0,0 +1,16 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "width": "500", + "height": "500", + "smartCropping": true, + "Image": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": "{binary}" + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGenerateThumbnailWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGenerateThumbnailWithUrl.json new file mode 100644 index 000000000000..28744179ecb8 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGenerateThumbnailWithUrl.json @@ -0,0 +1,18 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "width": "500", + "height": "500", + "smartCropping": true, + "ImageUrl": { + "url": "{url}" + } + }, + "responses": { + "200": { + "headers": {}, + "body": "{Binary}" + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGetAreaOfInterestWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGetAreaOfInterestWithStream.json new file mode 100644 index 000000000000..ae32a21a551e --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGetAreaOfInterestWithStream.json @@ -0,0 +1,26 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "Image": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "areaOfInterest": { + "h": 951, + "w": 950, + "x": 160, + "y": 0 + }, + "requestId": "ed2de1c6-fb55-4686-b0da-4da6e05d283f", + "metadata": { + "format": "Jpeg", + "height": 951, + "width": 1378 + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGetAreaOfInterestWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGetAreaOfInterestWithUrl.json new file mode 100644 index 000000000000..7d6d08208836 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGetAreaOfInterestWithUrl.json @@ -0,0 +1,28 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "ImageUrl": { + "url": "{url}" + } + }, + "responses": { + "200": { + "headers": {}, + "body": { + "areaOfInterest": { + "h": 951, + "w": 950, + "x": 160, + "y": 0 + }, + "requestId": "ed2de1c6-fb55-4686-b0da-4da6e05d283f", + "metadata": { + "format": "Jpeg", + "height": 951, + "width": 1378 + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGetReadResult.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGetReadResult.json new file mode 100644 index 000000000000..944e52b9d113 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulGetReadResult.json @@ -0,0 +1,369 @@ +{ + "parameters": { + "Endpoint": "{Endpoint}", + "Ocp-Apim-Subscription-Key": "{API key}", + "operationId": "e56ffa6e-1ee4-4042-bc07-993db706c95f" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "status": "succeeded", + "createdDateTime": "2019-10-03T14:32:04.236Z", + "lastUpdatedDateTime": "2019-10-03T14:38:14.852Z", + "analyzeResult": { + "version": "v3.1", + "readResults": [ + { + "page": 1, + "language": "en", + "angle": 49.59, + "width": 600, + "height": 400, + "unit": "pixel", + "lines": [ + { + "boundingBox": [ + 202, + 618, + 2047, + 643, + 2046, + 840, + 200, + 813 + ], + "text": "Our greatest glory is not", + "words": [ + { + "boundingBox": [ + 204, + 627, + 481, + 628, + 481, + 830, + 204, + 829 + ], + "text": "Our", + "confidence": 0.164 + }, + { + "boundingBox": [ + 519, + 628, + 1057, + 630, + 1057, + 832, + 518, + 830 + ], + "text": "greatest", + "confidence": 0.164 + }, + { + "boundingBox": [ + 1114, + 630, + 1549, + 631, + 1548, + 833, + 1114, + 832 + ], + "text": "glory", + "confidence": 0.164 + }, + { + "boundingBox": [ + 1586, + 631, + 1785, + 632, + 1784, + 834, + 1586, + 833 + ], + "text": "is", + "confidence": 0.164 + }, + { + "boundingBox": [ + 1822, + 632, + 2115, + 633, + 2115, + 835, + 1822, + 834 + ], + "text": "not", + "confidence": 0.164 + } + ] + }, + { + "boundingBox": [ + 420, + 1273, + 2954, + 1250, + 2958, + 1488, + 422, + 1511 + ], + "text": "but in rising every time we fall", + "words": [ + { + "boundingBox": [ + 423, + 1269, + 634, + 1268, + 635, + 1507, + 424, + 1508 + ], + "text": "but", + "confidence": 0.164 + }, + { + "boundingBox": [ + 667, + 1268, + 808, + 1268, + 809, + 1506, + 668, + 1507 + ], + "text": "in", + "confidence": 0.164 + }, + { + "boundingBox": [ + 874, + 1267, + 1289, + 1265, + 1290, + 1504, + 875, + 1506 + ], + "text": "rising", + "confidence": 0.164 + }, + { + "boundingBox": [ + 1331, + 1265, + 1771, + 1263, + 1772, + 1502, + 1332, + 1504 + ], + "text": "every", + "confidence": 0.164 + }, + { + "boundingBox": [ + 1812, + 1263, + 2178, + 1261, + 2179, + 1500, + 1813, + 1502 + ], + "text": "time", + "confidence": 0.164 + }, + { + "boundingBox": [ + 2219, + 1261, + 2510, + 1260, + 2511, + 1498, + 2220, + 1500 + ], + "text": "we", + "confidence": 0.164 + }, + { + "boundingBox": [ + 2551, + 1260, + 3016, + 1258, + 3017, + 1496, + 2552, + 1498 + ], + "text": "fall", + "confidence": 0.164 + } + ] + }, + { + "language": "es", + "boundingBox": [ + 1612, + 903, + 2744, + 935, + 2738, + 1139, + 1607, + 1107 + ], + "text": "Viva la vida", + "words": [ + { + "boundingBox": [ + 323, + 454, + 416, + 449, + 418, + 494, + 325, + 501 + ], + "text": "Viva", + "confidence": 0.164 + }, + { + "boundingBox": [ + 92, + 550, + 429, + 541, + 430, + 591, + 94, + 600 + ], + "text": "la", + "confidence": 0.164 + }, + { + "boundingBox": [ + 58, + 466, + 268, + 458, + 270, + 505, + 161, + 512 + ], + "text": "vida", + "confidence": 0.164 + } + ] + } + ] + }, + { + "page": 2, + "language": "en", + "angle": 1.32, + "width": 600, + "height": 400, + "unit": "pixel", + "lines": [ + { + "boundingBox": [ + 1612, + 903, + 2744, + 935, + 2738, + 1139, + 1607, + 1107 + ], + "text": "in never failing ,", + "words": [ + { + "boundingBox": [ + 1611, + 934, + 1707, + 933, + 1708, + 1147, + 1613, + 1147 + ], + "text": "in", + "confidence": 0.164 + }, + { + "boundingBox": [ + 1753, + 933, + 2132, + 930, + 2133, + 1144, + 1754, + 1146 + ], + "text": "never", + "confidence": 0.999 + }, + { + "boundingBox": [ + 2162, + 930, + 2673, + 927, + 2674, + 1140, + 2164, + 1144 + ], + "text": "failing", + "confidence": 0.164 + }, + { + "boundingBox": [ + 2703, + 926, + 2788, + 926, + 2790, + 1139, + 2705, + 1140 + ], + "text": ",", + "confidence": 0.164 + } + ] + } + ] + } + ] + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulListModels.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulListModels.json new file mode 100644 index 000000000000..54e72d376431 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulListModels.json @@ -0,0 +1,27 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "models": [ + { + "name": "celebrities", + "categories": [ + "people_" + ] + }, + { + "name": "landmarks", + "categories": [ + "building_" + ] + } + ] + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulReadWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulReadWithStream.json new file mode 100644 index 000000000000..a9728b640867 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulReadWithStream.json @@ -0,0 +1,15 @@ +{ + "parameters": { + "Endpoint": "{Endpoint}", + "Ocp-Apim-Subscription-Key": "{API key}", + "language": "en", + "Image": "{binary}" + }, + "responses": { + "202": { + "header": { + "location": "https://{domain}/vision/v3.1/read/e56ffa6e-1ee4-4042-bc07-993db706c95f" + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulReadWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulReadWithUrl.json new file mode 100644 index 000000000000..60c4dc56a96c --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulReadWithUrl.json @@ -0,0 +1,17 @@ +{ + "parameters": { + "Endpoint": "{Endpoint}", + "Ocp-Apim-Subscription-Key": "{API key}", + "language": "en", + "ImageUrl": { + "url": "{url}" + } + }, + "responses": { + "202": { + "header": { + "Operation-Location": "https://{domain}/vision/v3.1/read/e56ffa6e-1ee4-4042-bc07-993db706c95f" + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulRecognizePrintedTextWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulRecognizePrintedTextWithStream.json new file mode 100644 index 000000000000..201d0fe48254 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulRecognizePrintedTextWithStream.json @@ -0,0 +1,77 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "detectOrientation": "true", + "language": "en", + "Image": "{binary}" + }, + "responses": { + "200": { + "headers": {}, + "body": { + "language": "en", + "textAngle": -2.0000000000000338, + "orientation": "Up", + "regions": [ + { + "boundingBox": "462,379,497,258", + "lines": [ + { + "boundingBox": "462,379,497,74", + "words": [ + { + "boundingBox": "462,379,41,73", + "text": "A" + }, + { + "boundingBox": "523,379,153,73", + "text": "GOAL" + }, + { + "boundingBox": "694,379,265,74", + "text": "WITHOUT" + } + ] + }, + { + "boundingBox": "565,471,289,74", + "words": [ + { + "boundingBox": "565,471,41,73", + "text": "A" + }, + { + "boundingBox": "626,471,150,73", + "text": "PLAN" + }, + { + "boundingBox": "801,472,53,73", + "text": "IS" + } + ] + }, + { + "boundingBox": "519,563,375,74", + "words": [ + { + "boundingBox": "519,563,149,74", + "text": "JUST" + }, + { + "boundingBox": "683,564,41,72", + "text": "A" + }, + { + "boundingBox": "741,564,153,73", + "text": "WISH" + } + ] + } + ] + } + ] + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulRecognizePrintedTextWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulRecognizePrintedTextWithUrl.json new file mode 100644 index 000000000000..a0b8bc84e37b --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulRecognizePrintedTextWithUrl.json @@ -0,0 +1,79 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "detectOrientation": "true", + "language": "en", + "ImageUrl": { + "url": "{url}" + } + }, + "responses": { + "200": { + "headers": {}, + "body": { + "language": "en", + "textAngle": -2.0000000000000338, + "orientation": "Up", + "regions": [ + { + "boundingBox": "462,379,497,258", + "lines": [ + { + "boundingBox": "462,379,497,74", + "words": [ + { + "boundingBox": "462,379,41,73", + "text": "A" + }, + { + "boundingBox": "523,379,153,73", + "text": "GOAL" + }, + { + "boundingBox": "694,379,265,74", + "text": "WITHOUT" + } + ] + }, + { + "boundingBox": "565,471,289,74", + "words": [ + { + "boundingBox": "565,471,41,73", + "text": "A" + }, + { + "boundingBox": "626,471,150,73", + "text": "PLAN" + }, + { + "boundingBox": "801,472,53,73", + "text": "IS" + } + ] + }, + { + "boundingBox": "519,563,375,74", + "words": [ + { + "boundingBox": "519,563,149,74", + "text": "JUST" + }, + { + "boundingBox": "683,564,41,72", + "text": "A" + }, + { + "boundingBox": "741,564,153,73", + "text": "WISH" + } + ] + } + ] + } + ] + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulTagImageWithStream.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulTagImageWithStream.json new file mode 100644 index 000000000000..813a645d0086 --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulTagImageWithStream.json @@ -0,0 +1,53 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "Image": "{binary}" + }, + "responses": { + "200": { + "body": { + "tags": [ + { + "name": "grass", + "confidence": 0.9999997615814209 + }, + { + "name": "outdoor", + "confidence": 0.99997067451477051 + }, + { + "name": "sky", + "confidence": 0.99928975105285645 + }, + { + "name": "building", + "confidence": 0.99646323919296265 + }, + { + "name": "house", + "confidence": 0.99279803037643433 + }, + { + "name": "lawn", + "confidence": 0.82268029451370239 + }, + { + "name": "green", + "confidence": 0.64122253656387329 + }, + { + "name": "residential", + "confidence": 0.31403225660324097 + } + ], + "requestId": "1ad0e45e-b7b4-4be3-8042-53be96103337", + "metadata": { + "width": 400, + "height": 400, + "format": "Jpeg" + } + } + } + } +} diff --git a/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulTagImageWithUrl.json b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulTagImageWithUrl.json new file mode 100644 index 000000000000..0dcf0304a33a --- /dev/null +++ b/specification/cognitiveservices/data-plane/ComputerVision/stable/v3.1/examples/SuccessfulTagImageWithUrl.json @@ -0,0 +1,55 @@ +{ + "parameters": { + "Endpoint": "https://westus.api.cognitive.microsoft.com", + "Ocp-Apim-Subscription-Key": "{API key}", + "ImageUrl": { + "url": "{url}" + } + }, + "responses": { + "200": { + "body": { + "tags": [ + { + "name": "grass", + "confidence": 0.9999997615814209 + }, + { + "name": "outdoor", + "confidence": 0.99997067451477051 + }, + { + "name": "sky", + "confidence": 0.99928975105285645 + }, + { + "name": "building", + "confidence": 0.99646323919296265 + }, + { + "name": "house", + "confidence": 0.99279803037643433 + }, + { + "name": "lawn", + "confidence": 0.82268029451370239 + }, + { + "name": "green", + "confidence": 0.64122253656387329 + }, + { + "name": "residential", + "confidence": 0.31403225660324097 + } + ], + "requestId": "1ad0e45e-b7b4-4be3-8042-53be96103337", + "metadata": { + "width": 400, + "height": 400, + "format": "Jpeg" + } + } + } + } +}