From 0df2f59504e61a3514ee54c78454b95ae28c8d47 Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Tue, 10 Aug 2021 22:38:09 +0000 Subject: [PATCH] CodeGen from PR 15187 in Azure/azure-rest-api-specs Fixes for largefacelist and identify (#15187) Co-authored-by: Yang Chen --- .../cognitiveservices/v1.0/face/CHANGELOG.md | 68 ++++++- .../cognitiveservices/v1.0/face/_meta.json | 2 +- services/cognitiveservices/v1.0/face/enums.go | 45 ++++- services/cognitiveservices/v1.0/face/face.go | 175 +++++++++--------- .../v1.0/face/faceapi/interfaces.go | 6 +- .../v1.0/face/largefacelist.go | 91 +++++---- .../v1.0/face/largepersongroup.go | 26 +-- .../v1.0/face/largepersongroupperson.go | 46 ++--- services/cognitiveservices/v1.0/face/list.go | 68 +++---- .../cognitiveservices/v1.0/face/models.go | 47 +++-- .../v1.0/face/persongroup.go | 22 +-- .../v1.0/face/persongroupperson.go | 56 ++---- 12 files changed, 341 insertions(+), 311 deletions(-) diff --git a/services/cognitiveservices/v1.0/face/CHANGELOG.md b/services/cognitiveservices/v1.0/face/CHANGELOG.md index 52911e4cc5e4..656089d0d1c4 100644 --- a/services/cognitiveservices/v1.0/face/CHANGELOG.md +++ b/services/cognitiveservices/v1.0/face/CHANGELOG.md @@ -1,2 +1,68 @@ -# Change History +# Unreleased +## Breaking Changes + +### Removed Constants + +1. AccessoryType.Glasses +1. AccessoryType.HeadWear +1. AccessoryType.Mask + +### Signature Changes + +#### Funcs + +1. Client.DetectWithStream + - Params + - From: context.Context, io.ReadCloser, *bool, *bool, []AttributeType, RecognitionModel, *bool, DetectionModel + - To: context.Context, io.ReadCloser, *bool, *bool, []AttributeType, RecognitionModel, *bool, DetectionModel, *int32 +1. Client.DetectWithStreamPreparer + - Params + - From: context.Context, io.ReadCloser, *bool, *bool, []AttributeType, RecognitionModel, *bool, DetectionModel + - To: context.Context, io.ReadCloser, *bool, *bool, []AttributeType, RecognitionModel, *bool, DetectionModel, *int32 +1. Client.DetectWithURL + - Params + - From: context.Context, ImageURL, *bool, *bool, []AttributeType, RecognitionModel, *bool, DetectionModel + - To: context.Context, ImageURL, *bool, *bool, []AttributeType, RecognitionModel, *bool, DetectionModel, *int32 +1. Client.DetectWithURLPreparer + - Params + - From: context.Context, ImageURL, *bool, *bool, []AttributeType, RecognitionModel, *bool, DetectionModel + - To: context.Context, ImageURL, *bool, *bool, []AttributeType, RecognitionModel, *bool, DetectionModel, *int32 +1. LargeFaceListClient.List + - Params + - From: context.Context, *bool + - To: context.Context, *bool, string, *int32 +1. LargeFaceListClient.ListPreparer + - Params + - From: context.Context, *bool + - To: context.Context, *bool, string, *int32 + +## Additive Changes + +### New Constants + +1. AccessoryType.AccessoryTypeGlasses +1. AccessoryType.AccessoryTypeHeadWear +1. AccessoryType.AccessoryTypeMask +1. AttributeType.AttributeTypeMask +1. DetectionModel.Detection03 +1. MaskType.FaceMask +1. MaskType.NoMask +1. MaskType.OtherMaskOrOcclusion +1. MaskType.Uncertain +1. RecognitionModel.Recognition04 + +### New Funcs + +1. PossibleMaskTypeValues() []MaskType + +### Struct Changes + +#### New Structs + +1. Mask +1. NonNullableNameAndNullableUserDataContract + +#### New Struct Fields + +1. Attributes.Mask diff --git a/services/cognitiveservices/v1.0/face/_meta.json b/services/cognitiveservices/v1.0/face/_meta.json index ae089d339db0..a7ac7ebc41c3 100644 --- a/services/cognitiveservices/v1.0/face/_meta.json +++ b/services/cognitiveservices/v1.0/face/_meta.json @@ -1,5 +1,5 @@ { - "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", + "commit": "be7b604843af31340425287488cba84e62dba855", "readme": "/_/azure-rest-api-specs/specification/cognitiveservices/data-plane/Face/readme.md", "tag": "release_1_0", "use": "@microsoft.azure/autorest.go@2.1.183", diff --git a/services/cognitiveservices/v1.0/face/enums.go b/services/cognitiveservices/v1.0/face/enums.go index 29b48133eae0..2e465c5ffec5 100644 --- a/services/cognitiveservices/v1.0/face/enums.go +++ b/services/cognitiveservices/v1.0/face/enums.go @@ -10,17 +10,17 @@ package face type AccessoryType string const ( - // Glasses ... - Glasses AccessoryType = "glasses" - // HeadWear ... - HeadWear AccessoryType = "headWear" - // Mask ... - Mask AccessoryType = "mask" + // AccessoryTypeGlasses ... + AccessoryTypeGlasses AccessoryType = "glasses" + // AccessoryTypeHeadWear ... + AccessoryTypeHeadWear AccessoryType = "headWear" + // AccessoryTypeMask ... + AccessoryTypeMask AccessoryType = "mask" ) // PossibleAccessoryTypeValues returns an array of possible values for the AccessoryType const type. func PossibleAccessoryTypeValues() []AccessoryType { - return []AccessoryType{Glasses, HeadWear, Mask} + return []AccessoryType{AccessoryTypeGlasses, AccessoryTypeHeadWear, AccessoryTypeMask} } // AttributeType enumerates the values for attribute type. @@ -49,6 +49,8 @@ const ( AttributeTypeHeadPose AttributeType = "headPose" // AttributeTypeMakeup ... AttributeTypeMakeup AttributeType = "makeup" + // AttributeTypeMask ... + AttributeTypeMask AttributeType = "mask" // AttributeTypeNoise ... AttributeTypeNoise AttributeType = "noise" // AttributeTypeOcclusion ... @@ -59,7 +61,7 @@ const ( // PossibleAttributeTypeValues returns an array of possible values for the AttributeType const type. func PossibleAttributeTypeValues() []AttributeType { - return []AttributeType{AttributeTypeAccessories, AttributeTypeAge, AttributeTypeBlur, AttributeTypeEmotion, AttributeTypeExposure, AttributeTypeFacialHair, AttributeTypeGender, AttributeTypeGlasses, AttributeTypeHair, AttributeTypeHeadPose, AttributeTypeMakeup, AttributeTypeNoise, AttributeTypeOcclusion, AttributeTypeSmile} + return []AttributeType{AttributeTypeAccessories, AttributeTypeAge, AttributeTypeBlur, AttributeTypeEmotion, AttributeTypeExposure, AttributeTypeFacialHair, AttributeTypeGender, AttributeTypeGlasses, AttributeTypeHair, AttributeTypeHeadPose, AttributeTypeMakeup, AttributeTypeMask, AttributeTypeNoise, AttributeTypeOcclusion, AttributeTypeSmile} } // BlurLevel enumerates the values for blur level. @@ -87,11 +89,13 @@ const ( Detection01 DetectionModel = "detection_01" // Detection02 ... Detection02 DetectionModel = "detection_02" + // Detection03 ... + Detection03 DetectionModel = "detection_03" ) // PossibleDetectionModelValues returns an array of possible values for the DetectionModel const type. func PossibleDetectionModelValues() []DetectionModel { - return []DetectionModel{Detection01, Detection02} + return []DetectionModel{Detection01, Detection02, Detection03} } // ExposureLevel enumerates the values for exposure level. @@ -187,6 +191,25 @@ func PossibleHairColorTypeValues() []HairColorType { return []HairColorType{Black, Blond, Brown, Gray, Other, Red, Unknown, White} } +// MaskType enumerates the values for mask type. +type MaskType string + +const ( + // FaceMask ... + FaceMask MaskType = "faceMask" + // NoMask ... + NoMask MaskType = "noMask" + // OtherMaskOrOcclusion ... + OtherMaskOrOcclusion MaskType = "otherMaskOrOcclusion" + // Uncertain ... + Uncertain MaskType = "uncertain" +) + +// PossibleMaskTypeValues returns an array of possible values for the MaskType const type. +func PossibleMaskTypeValues() []MaskType { + return []MaskType{FaceMask, NoMask, OtherMaskOrOcclusion, Uncertain} +} + // NoiseLevel enumerates the values for noise level. type NoiseLevel string @@ -233,11 +256,13 @@ const ( Recognition02 RecognitionModel = "recognition_02" // Recognition03 ... Recognition03 RecognitionModel = "recognition_03" + // Recognition04 ... + Recognition04 RecognitionModel = "recognition_04" ) // PossibleRecognitionModelValues returns an array of possible values for the RecognitionModel const type. func PossibleRecognitionModelValues() []RecognitionModel { - return []RecognitionModel{Recognition01, Recognition02, Recognition03} + return []RecognitionModel{Recognition01, Recognition02, Recognition03, Recognition04} } // SnapshotApplyMode enumerates the values for snapshot apply mode. diff --git a/services/cognitiveservices/v1.0/face/face.go b/services/cognitiveservices/v1.0/face/face.go index d09005843282..b4350fad7d75 100644 --- a/services/cognitiveservices/v1.0/face/face.go +++ b/services/cognitiveservices/v1.0/face/face.go @@ -29,58 +29,41 @@ func NewClient(endpoint string) Client { // DetectWithStream detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and // attributes.
// * No image will be stored. Only the extracted face feature will be stored on server. The faceId is an identifier of -// the face feature and will be used in [Face - -// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face - -// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find -// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar). The stored face feature(s) -// will expire and be deleted 24 hours after the original detection call. +// the face feature and will be used in [Face - Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify), +// [Face - Verify](https://docs.microsoft.com/rest/api/faceapi/face/verifyfacetoface), and [Face - Find +// Similar](https://docs.microsoft.com/rest/api/faceapi/face/findsimilar). The stored face feature(s) will expire and +// be deleted at the time specified by faceIdTimeToLive after the original detection call. // * Optional parameters include faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile, -// facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. Some of the results -// returned for specific attributes may not be highly accurate. +// facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure, noise, and mask. Some of the +// results returned for specific attributes may not be highly accurate. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small. -// * For optimal results when querying [Face - -// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face - -// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find -// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar) ('returnFaceId' is true), -// please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes). +// * For optimal results when querying [Face - Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify), +// [Face - Verify](https://docs.microsoft.com/rest/api/faceapi/face/verifyfacetoface), and [Face - Find +// Similar](https://docs.microsoft.com/rest/api/faceapi/face/findsimilar) ('returnFaceId' is true), please use faces +// that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes). // * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with // dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size. // * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to // [How to specify a detection // model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'detection_01': | The default detection model for [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). Recommend for near frontal -// face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image -// orientation, the faces in such cases may not be detected. | -// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry -// faces. | -// // * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are // needed, please specify the recognition model with 'recognitionModel' parameter. The default value for // 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this // parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More -// details, please refer to [How to specify a recognition -// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'recognition_01': | The default recognition model for [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). All those faceIds created -// before 2019 March are bonded with this recognition model. | -// | 'recognition_02': | Recognition model released in 2019 March. | -// | 'recognition_03': | Recognition model released in 2020 May. 'recognition_03' is recommended since its overall -// accuracy is improved compared with 'recognition_01' and 'recognition_02'. | +// details, please refer to [Specify a recognition +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model). // Parameters: // imageParameter - an image stream. // returnFaceID - a value indicating whether the operation should return faceIds of detected faces. // returnFaceLandmarks - a value indicating whether the operation should return landmarks of the detected // faces. // returnFaceAttributes - analyze and return the one or more specified face attributes in the comma-separated -// string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose, -// smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational -// and time cost. +// string like "returnFaceAttributes=age,gender". The available attributes depends on the 'detectionModel' +// specified. 'detection_01' supports age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, +// occlusion, accessories, blur, exposure, and noise. While 'detection_02' does not support any attributes and +// 'detection_03' only supports mask. Note that each face attribute analysis has additional computational and +// time cost. // recognitionModel - name of recognition model. Recognition model is used when the face features are extracted // and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be // provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The @@ -91,7 +74,9 @@ func NewClient(endpoint string) Client { // detection model name can be provided when performing Face - Detect or (Large)FaceList - Add Face or // (Large)PersonGroup - Add Face. The default value is 'detection_01', if another model is needed, please // explicitly specify it. -func (client Client) DetectWithStream(ctx context.Context, imageParameter io.ReadCloser, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (result ListDetectedFace, err error) { +// faceIDTimeToLive - the number of seconds for the faceId being cached. Supported range from 60 seconds up to +// 86400 seconds. The default value is 86400 (24 hours). +func (client Client) DetectWithStream(ctx context.Context, imageParameter io.ReadCloser, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel, faceIDTimeToLive *int32) (result ListDetectedFace, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/Client.DetectWithStream") defer func() { @@ -102,7 +87,16 @@ func (client Client) DetectWithStream(ctx context.Context, imageParameter io.Rea tracing.EndSpan(ctx, sc, err) }() } - req, err := client.DetectWithStreamPreparer(ctx, imageParameter, returnFaceID, returnFaceLandmarks, returnFaceAttributes, recognitionModel, returnRecognitionModel, detectionModel) + if err := validation.Validate([]validation.Validation{ + {TargetValue: faceIDTimeToLive, + Constraints: []validation.Constraint{{Target: "faceIDTimeToLive", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "faceIDTimeToLive", Name: validation.InclusiveMaximum, Rule: int64(86400), Chain: nil}, + {Target: "faceIDTimeToLive", Name: validation.InclusiveMinimum, Rule: int64(60), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("face.Client", "DetectWithStream", err.Error()) + } + + req, err := client.DetectWithStreamPreparer(ctx, imageParameter, returnFaceID, returnFaceLandmarks, returnFaceAttributes, recognitionModel, returnRecognitionModel, detectionModel, faceIDTimeToLive) if err != nil { err = autorest.NewErrorWithError(err, "face.Client", "DetectWithStream", nil, "Failure preparing request") return @@ -125,7 +119,7 @@ func (client Client) DetectWithStream(ctx context.Context, imageParameter io.Rea } // DetectWithStreamPreparer prepares the DetectWithStream request. -func (client Client) DetectWithStreamPreparer(ctx context.Context, imageParameter io.ReadCloser, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (*http.Request, error) { +func (client Client) DetectWithStreamPreparer(ctx context.Context, imageParameter io.ReadCloser, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel, faceIDTimeToLive *int32) (*http.Request, error) { urlParameters := map[string]interface{}{ "Endpoint": client.Endpoint, } @@ -159,6 +153,11 @@ func (client Client) DetectWithStreamPreparer(ctx context.Context, imageParamete } else { queryParameters["detectionModel"] = autorest.Encode("query", "detection_01") } + if faceIDTimeToLive != nil { + queryParameters["faceIdTimeToLive"] = autorest.Encode("query", *faceIDTimeToLive) + } else { + queryParameters["faceIdTimeToLive"] = autorest.Encode("query", 86400) + } preparer := autorest.CreatePreparer( autorest.AsContentType("application/octet-stream"), @@ -191,58 +190,42 @@ func (client Client) DetectWithStreamResponder(resp *http.Response) (result List // DetectWithURL detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and // attributes.
// * No image will be stored. Only the extracted face feature will be stored on server. The faceId is an identifier of -// the face feature and will be used in [Face - -// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face - -// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find -// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar). The stored face feature(s) -// will expire and be deleted 24 hours after the original detection call. +// the face feature and will be used in [Face - Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify), +// [Face - Verify](https://docs.microsoft.com/rest/api/faceapi/face/verifyfacetoface), and [Face - Find +// Similar](https://docs.microsoft.com/rest/api/faceapi/face/findsimilar). The stored face feature(s) will expire and +// be deleted at the time specified by faceIdTimeToLive after the original detection call. // * Optional parameters include faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile, -// facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. Some of the results -// returned for specific attributes may not be highly accurate. +// facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure, noise, and mask. Some of the +// results returned for specific attributes may not be highly accurate. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small. -// * For optimal results when querying [Face - -// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face - -// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find -// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar) ('returnFaceId' is true), -// please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes). +// * For optimal results when querying [Face - Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify), +// [Face - Verify](https://docs.microsoft.com/rest/api/faceapi/face/verifyfacetoface), and [Face - Find +// Similar](https://docs.microsoft.com/rest/api/faceapi/face/findsimilar) ('returnFaceId' is true), please use faces +// that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes). // * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with // dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size. // * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to // [How to specify a detection -// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'detection_01': | The default detection model for [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). Recommend for near frontal -// face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image -// orientation, the faces in such cases may not be detected. | -// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry -// faces. | +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model). // // * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are // needed, please specify the recognition model with 'recognitionModel' parameter. The default value for // 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this // parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More -// details, please refer to [How to specify a recognition -// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'recognition_01': | The default recognition model for [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). All those faceIds created -// before 2019 March are bonded with this recognition model. | -// | 'recognition_02': | Recognition model released in 2019 March. | -// | 'recognition_03': | Recognition model released in 2020 May. 'recognition_03' is recommended since its overall -// accuracy is improved compared with 'recognition_01' and 'recognition_02'. | +// details, please refer to [Specify a recognition +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model). // Parameters: // imageURL - a JSON document with a URL pointing to the image that is to be analyzed. // returnFaceID - a value indicating whether the operation should return faceIds of detected faces. // returnFaceLandmarks - a value indicating whether the operation should return landmarks of the detected // faces. // returnFaceAttributes - analyze and return the one or more specified face attributes in the comma-separated -// string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose, -// smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational -// and time cost. +// string like "returnFaceAttributes=age,gender". The available attributes depends on the 'detectionModel' +// specified. 'detection_01' supports age, gender, headPose, smile, facialHair, glasses, emotion, hair, makeup, +// occlusion, accessories, blur, exposure, and noise. While 'detection_02' does not support any attributes and +// 'detection_03' only supports mask. Note that each face attribute analysis has additional computational and +// time cost. // recognitionModel - name of recognition model. Recognition model is used when the face features are extracted // and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be // provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The @@ -253,7 +236,9 @@ func (client Client) DetectWithStreamResponder(resp *http.Response) (result List // detection model name can be provided when performing Face - Detect or (Large)FaceList - Add Face or // (Large)PersonGroup - Add Face. The default value is 'detection_01', if another model is needed, please // explicitly specify it. -func (client Client) DetectWithURL(ctx context.Context, imageURL ImageURL, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (result ListDetectedFace, err error) { +// faceIDTimeToLive - the number of seconds for the faceId being cached. Supported range from 60 seconds up to +// 86400 seconds. The default value is 86400 (24 hours). +func (client Client) DetectWithURL(ctx context.Context, imageURL ImageURL, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel, faceIDTimeToLive *int32) (result ListDetectedFace, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/Client.DetectWithURL") defer func() { @@ -266,11 +251,16 @@ func (client Client) DetectWithURL(ctx context.Context, imageURL ImageURL, retur } if err := validation.Validate([]validation.Validation{ {TargetValue: imageURL, - Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}, + {TargetValue: faceIDTimeToLive, + Constraints: []validation.Constraint{{Target: "faceIDTimeToLive", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "faceIDTimeToLive", Name: validation.InclusiveMaximum, Rule: int64(86400), Chain: nil}, + {Target: "faceIDTimeToLive", Name: validation.InclusiveMinimum, Rule: int64(60), Chain: nil}, + }}}}}); err != nil { return result, validation.NewError("face.Client", "DetectWithURL", err.Error()) } - req, err := client.DetectWithURLPreparer(ctx, imageURL, returnFaceID, returnFaceLandmarks, returnFaceAttributes, recognitionModel, returnRecognitionModel, detectionModel) + req, err := client.DetectWithURLPreparer(ctx, imageURL, returnFaceID, returnFaceLandmarks, returnFaceAttributes, recognitionModel, returnRecognitionModel, detectionModel, faceIDTimeToLive) if err != nil { err = autorest.NewErrorWithError(err, "face.Client", "DetectWithURL", nil, "Failure preparing request") return @@ -293,7 +283,7 @@ func (client Client) DetectWithURL(ctx context.Context, imageURL ImageURL, retur } // DetectWithURLPreparer prepares the DetectWithURL request. -func (client Client) DetectWithURLPreparer(ctx context.Context, imageURL ImageURL, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (*http.Request, error) { +func (client Client) DetectWithURLPreparer(ctx context.Context, imageURL ImageURL, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel, faceIDTimeToLive *int32) (*http.Request, error) { urlParameters := map[string]interface{}{ "Endpoint": client.Endpoint, } @@ -327,6 +317,11 @@ func (client Client) DetectWithURLPreparer(ctx context.Context, imageURL ImageUR } else { queryParameters["detectionModel"] = autorest.Encode("query", "detection_01") } + if faceIDTimeToLive != nil { + queryParameters["faceIdTimeToLive"] = autorest.Encode("query", *faceIDTimeToLive) + } else { + queryParameters["faceIdTimeToLive"] = autorest.Encode("query", 86400) + } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), @@ -357,14 +352,15 @@ func (client Client) DetectWithURLResponder(resp *http.Response) (result ListDet } // FindSimilar given query face's faceId, to search the similar-looking faces from a faceId array, a face list or a -// large face list. faceId array contains the faces created by [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), which will expire 24 hours -// after creation. A "faceListId" is created by [FaceList - -// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/create) containing persistedFaceIds that -// will not expire. And a "largeFaceListId" is created by [LargeFaceList - -// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/create) containing persistedFaceIds -// that will also not expire. Depending on the input the returned similar faces list contains faceIds or -// persistedFaceIds ranked by similarity. +// large face list. faceId array contains the faces created by [Face - Detect With +// Url](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl) or [Face - Detect With +// Stream](https://docs.microsoft.com/rest/api/faceapi/face/detectwithstream), which will expire at the time specified +// by faceIdTimeToLive after creation. A "faceListId" is created by [FaceList - +// Create](https://docs.microsoft.com/rest/api/faceapi/facelist/create) containing persistedFaceIds that will not +// expire. And a "largeFaceListId" is created by [LargeFaceList - +// Create](https://docs.microsoft.com/rest/api/faceapi/largefacelist/create) containing persistedFaceIds that will also +// not expire. Depending on the input the returned similar faces list contains faceIds or persistedFaceIds ranked by +// similarity. //
Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default mode that it // tries to find faces of the same person as possible by using internal same-person thresholds. It is useful to find a // known person's other photos. Note that an empty list will be returned if no faces pass the internal thresholds. @@ -467,8 +463,7 @@ func (client Client) FindSimilarResponder(resp *http.Response) (result ListSimil // * MessyGroup is a special face group containing faces that cannot find any similar counterpart face from original // faces. The messyGroup will not appear in the result if all faces found their counterparts. // * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try [Face - -// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface) when you only have 2 -// candidate faces. +// Verify](https://docs.microsoft.com/rest/api/faceapi/face/verifyfacetoface) when you only have 2 candidate faces. // * The 'recognitionModel' associated with the query faces' faceIds should be the same. // Parameters: // body - request body for grouping. @@ -551,8 +546,8 @@ func (client Client) GroupResponder(resp *http.Response) (result GroupResult, er // faces in the person group (given by personGroupId) or large person group (given by largePersonGroupId), and return // candidate person(s) for that face ranked by similarity confidence. The person group/large person group should be // trained to make it ready for identification. See more in [PersonGroup - -// Train](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup/train) and [LargePersonGroup - -// Train](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup/train). +// Train](https://docs.microsoft.com/rest/api/faceapi/persongroup/train) and [LargePersonGroup - +// Train](https://docs.microsoft.com/rest/api/faceapi/largepersongroup/train). //
// // Remarks:
@@ -563,8 +558,8 @@ func (client Client) GroupResponder(resp *http.Response) (result GroupResult, er // clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. // * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is // identified, the returned candidates will be an empty array. -// * Try [Face - Find Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar) when you -// need to find similar faces from a face list/large face list instead of a person group/large person group. +// * Try [Face - Find Similar](https://docs.microsoft.com/rest/api/faceapi/face/findsimilar) when you need to find +// similar faces from a face list/large face list instead of a person group/large person group. // * The 'recognitionModel' associated with the query faces' faceIds should be the same as the 'recognitionModel' used // by the target person group or large person group. // Parameters: @@ -593,7 +588,7 @@ func (client Client) Identify(ctx context.Context, body IdentifyRequest) (result {Target: "body.LargePersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil}, }}, {Target: "body.MaxNumOfCandidatesReturned", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMaximum, Rule: int64(5), Chain: nil}, + Chain: []validation.Constraint{{Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, {Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("face.Client", "Identify", err.Error()) diff --git a/services/cognitiveservices/v1.0/face/faceapi/interfaces.go b/services/cognitiveservices/v1.0/face/faceapi/interfaces.go index 3772d229198b..46caa7e65710 100644 --- a/services/cognitiveservices/v1.0/face/faceapi/interfaces.go +++ b/services/cognitiveservices/v1.0/face/faceapi/interfaces.go @@ -16,8 +16,8 @@ import ( // ClientAPI contains the set of methods on the Client type. type ClientAPI interface { - DetectWithStream(ctx context.Context, imageParameter io.ReadCloser, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []face.AttributeType, recognitionModel face.RecognitionModel, returnRecognitionModel *bool, detectionModel face.DetectionModel) (result face.ListDetectedFace, err error) - DetectWithURL(ctx context.Context, imageURL face.ImageURL, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []face.AttributeType, recognitionModel face.RecognitionModel, returnRecognitionModel *bool, detectionModel face.DetectionModel) (result face.ListDetectedFace, err error) + DetectWithStream(ctx context.Context, imageParameter io.ReadCloser, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []face.AttributeType, recognitionModel face.RecognitionModel, returnRecognitionModel *bool, detectionModel face.DetectionModel, faceIDTimeToLive *int32) (result face.ListDetectedFace, err error) + DetectWithURL(ctx context.Context, imageURL face.ImageURL, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []face.AttributeType, recognitionModel face.RecognitionModel, returnRecognitionModel *bool, detectionModel face.DetectionModel, faceIDTimeToLive *int32) (result face.ListDetectedFace, err error) FindSimilar(ctx context.Context, body face.FindSimilarRequest) (result face.ListSimilarFace, err error) Group(ctx context.Context, body face.GroupRequest) (result face.GroupResult, err error) Identify(ctx context.Context, body face.IdentifyRequest) (result face.ListIdentifyResult, err error) @@ -109,7 +109,7 @@ type LargeFaceListClientAPI interface { Get(ctx context.Context, largeFaceListID string, returnRecognitionModel *bool) (result face.LargeFaceList, err error) GetFace(ctx context.Context, largeFaceListID string, persistedFaceID uuid.UUID) (result face.PersistedFace, err error) GetTrainingStatus(ctx context.Context, largeFaceListID string) (result face.TrainingStatus, err error) - List(ctx context.Context, returnRecognitionModel *bool) (result face.ListLargeFaceList, err error) + List(ctx context.Context, returnRecognitionModel *bool, start string, top *int32) (result face.ListLargeFaceList, err error) ListFaces(ctx context.Context, largeFaceListID string, start string, top *int32) (result face.ListPersistedFace, err error) Train(ctx context.Context, largeFaceListID string) (result autorest.Response, err error) Update(ctx context.Context, largeFaceListID string, body face.NameAndUserDataContract) (result autorest.Response, err error) diff --git a/services/cognitiveservices/v1.0/face/largefacelist.go b/services/cognitiveservices/v1.0/face/largefacelist.go index 4293e941a1e7..2e0e1df1a6e5 100644 --- a/services/cognitiveservices/v1.0/face/largefacelist.go +++ b/services/cognitiveservices/v1.0/face/largefacelist.go @@ -31,17 +31,17 @@ func NewLargeFaceListClient(endpoint string) LargeFaceListClient { //
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace // rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted // face feature will be stored on server until [LargeFaceList Face - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/deleteface) or [LargeFaceList - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/delete) is called. +// Delete](https://docs.microsoft.com/rest/api/faceapi/largefacelist/deleteface) or [LargeFaceList - +// Delete](https://docs.microsoft.com/rest/api/faceapi/largefacelist/delete) is called. //
Note persistedFaceId is different from faceId generated by [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl). // * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, // and face size is 200x200 pixels (100 pixels between eyes) or bigger. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the // provided "targetFace" rectangle is not returned from [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), there’s no guarantee to -// detect and add the face successfully. +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl), there’s no guarantee to detect and add the +// face successfully. // * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures. // * Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in // parallel. @@ -49,15 +49,7 @@ func NewLargeFaceListClient(endpoint string) LargeFaceListClient { // dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size. // * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to // [How to specify a detection -// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'detection_01': | The default detection model for [LargeFaceList - Add -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/addfacefromurl). Recommend for near -// frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong -// image orientation, the faces in such cases may not be detected. | -// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry -// faces. | +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model). // // Quota: // * Free-tier subscription quota: 1,000 faces per large face list. @@ -172,17 +164,17 @@ func (client LargeFaceListClient) AddFaceFromStreamResponder(resp *http.Response //
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace // rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted // face feature will be stored on server until [LargeFaceList Face - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/deleteface) or [LargeFaceList - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/delete) is called. +// Delete](https://docs.microsoft.com/rest/api/faceapi/largefacelist/deleteface) or [LargeFaceList - +// Delete](https://docs.microsoft.com/rest/api/faceapi/largefacelist/delete) is called. //
Note persistedFaceId is different from faceId generated by [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl). // * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, // and face size is 200x200 pixels (100 pixels between eyes) or bigger. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the // provided "targetFace" rectangle is not returned from [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), there’s no guarantee to -// detect and add the face successfully. +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl), there’s no guarantee to detect and add the +// face successfully. // * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures. // * Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in // parallel. @@ -191,14 +183,6 @@ func (client LargeFaceListClient) AddFaceFromStreamResponder(resp *http.Response // * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to // [How to specify a detection // model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'detection_01': | The default detection model for [LargeFaceList - Add -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/addfacefromurl). Recommend for near -// frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong -// image orientation, the faces in such cases may not be detected. | -// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry -// faces. | // // Quota: // * Free-tier subscription quota: 1,000 faces per large face list. @@ -314,29 +298,24 @@ func (client LargeFaceListClient) AddFaceFromURLResponder(resp *http.Response) ( // Create create an empty large face list with user-specified largeFaceListId, name, an optional userData and // recognitionModel. //
Large face list is a list of faces, up to 1,000,000 faces, and used by [Face - Find -// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar). +// Similar](https://docs.microsoft.com/rest/api/faceapi/face/findsimilar). //
After creation, user should use [LargeFaceList Face - -// Add](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/addfacefromurl) to import the faces -// and [LargeFaceList - Train](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/train) to make -// it ready for [Face - Find Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar). No -// image will be stored. Only the extracted face features are stored on server until [LargeFaceList - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/delete) is called. +// Add](https://docs.microsoft.com/rest/api/faceapi/largefacelist/addfacefromurl) to import the faces and +// [LargeFaceList - Train](https://docs.microsoft.com/rest/api/faceapi/largefacelist/train) to make it ready for [Face +// - Find Similar](https://docs.microsoft.com/rest/api/faceapi/face/findsimilar). No image will be stored. Only the +// extracted face features are stored on server until [LargeFaceList - +// Delete](https://docs.microsoft.com/rest/api/faceapi/largefacelist/delete) is called. //
Find Similar is used for scenario like finding celebrity-like faces, similar face filtering, or as a light // way face identification. But if the actual use is to identify person, please use -// [PersonGroup](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup) / -// [LargePersonGroup](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup) and [Face - -// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify). +// [PersonGroup](https://docs.microsoft.com/rest/api/faceapi/persongroup) / +// [LargePersonGroup](https://docs.microsoft.com/rest/api/faceapi/largepersongroup) and [Face - +// Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify). //
'recognitionModel' should be specified to associate with this large face list. The default value for // 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly specify the model you need in // this parameter. New faces that are added to an existing large face list will use the recognition model that's // already associated with the collection. Existing face features in a large face list can't be updated to features -// extracted by another version of recognition model. -// * 'recognition_01': The default recognition model for [LargeFaceList- -// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/create). All those large face lists -// created before 2019 March are bonded with this recognition model. -// * 'recognition_02': Recognition model released in 2019 March. -// * 'recognition_03': Recognition model released in 2020 May. 'recognition_03' is recommended since its overall -// accuracy is improved compared with 'recognition_01' and 'recognition_02'. +// extracted by another version of recognition model. Please refer to [Specify a recognition +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model). // // Large face list quota: // * Free-tier subscription quota: 64 large face lists. @@ -827,7 +806,7 @@ func (client LargeFaceListClient) GetTrainingStatusResponder(resp *http.Response // List list large face lists’ information of largeFaceListId, name, userData and recognitionModel.
// To get face information inside largeFaceList use [LargeFaceList Face - -// Get](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/getface)
+// Get](https://docs.microsoft.com/rest/api/faceapi/largefacelist/getface)
// * Large face lists are stored in alphabetical order of largeFaceListId. // * "start" parameter (string, optional) is a user-provided largeFaceListId value that returned entries have larger // ids by string comparison. "start" set to empty to indicate return from the first item. @@ -841,7 +820,10 @@ func (client LargeFaceListClient) GetTrainingStatusResponder(resp *http.Response // Parameters: // returnRecognitionModel - a value indicating whether the operation should return 'recognitionModel' in // response. -func (client LargeFaceListClient) List(ctx context.Context, returnRecognitionModel *bool) (result ListLargeFaceList, err error) { +// start - starting large face list id to return (used to list a range of large face lists). +// top - number of large face lists to return starting with the large face list id indicated by the 'start' +// parameter. +func (client LargeFaceListClient) List(ctx context.Context, returnRecognitionModel *bool, start string, top *int32) (result ListLargeFaceList, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/LargeFaceListClient.List") defer func() { @@ -852,7 +834,16 @@ func (client LargeFaceListClient) List(ctx context.Context, returnRecognitionMod tracing.EndSpan(ctx, sc, err) }() } - req, err := client.ListPreparer(ctx, returnRecognitionModel) + if err := validation.Validate([]validation.Validation{ + {TargetValue: top, + Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, + {Target: "top", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("face.LargeFaceListClient", "List", err.Error()) + } + + req, err := client.ListPreparer(ctx, returnRecognitionModel, start, top) if err != nil { err = autorest.NewErrorWithError(err, "face.LargeFaceListClient", "List", nil, "Failure preparing request") return @@ -875,7 +866,7 @@ func (client LargeFaceListClient) List(ctx context.Context, returnRecognitionMod } // ListPreparer prepares the List request. -func (client LargeFaceListClient) ListPreparer(ctx context.Context, returnRecognitionModel *bool) (*http.Request, error) { +func (client LargeFaceListClient) ListPreparer(ctx context.Context, returnRecognitionModel *bool, start string, top *int32) (*http.Request, error) { urlParameters := map[string]interface{}{ "Endpoint": client.Endpoint, } @@ -886,6 +877,12 @@ func (client LargeFaceListClient) ListPreparer(ctx context.Context, returnRecogn } else { queryParameters["returnRecognitionModel"] = autorest.Encode("query", false) } + if len(start) > 0 { + queryParameters["start"] = autorest.Encode("query", start) + } + if top != nil { + queryParameters["top"] = autorest.Encode("query", *top) + } preparer := autorest.CreatePreparer( autorest.AsGet(), diff --git a/services/cognitiveservices/v1.0/face/largepersongroup.go b/services/cognitiveservices/v1.0/face/largepersongroup.go index 2093bb01dd3e..13cadfa26882 100644 --- a/services/cognitiveservices/v1.0/face/largepersongroup.go +++ b/services/cognitiveservices/v1.0/face/largepersongroup.go @@ -31,26 +31,18 @@ func NewLargePersonGroupClient(endpoint string) LargePersonGroupClient { // to 1,000,000 // people. //
After creation, use [LargePersonGroup Person - -// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroupperson/create) to add person into -// the group, and call [LargePersonGroup - -// Train](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup/train) to get this group ready -// for [Face - Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify). +// Create](https://docs.microsoft.com/rest/api/faceapi/largepersongroupperson/create) to add person into the group, and +// call [LargePersonGroup - Train](https://docs.microsoft.com/rest/api/faceapi/largepersongroup/train) to get this +// group ready for [Face - Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify). //
No image will be stored. Only the person's extracted face features and userData will be stored on server -// until [LargePersonGroup Person - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroupperson/delete) or -// [LargePersonGroup - Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup/delete) is -// called. +// until [LargePersonGroup Person - Delete](https://docs.microsoft.com/rest/api/faceapi/largepersongroupperson/delete) +// or [LargePersonGroup - Delete](https://docs.microsoft.com/rest/api/faceapi/largepersongroup/delete) is called. //
'recognitionModel' should be specified to associate with this large person group. The default value for // 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly specify the model you need in // this parameter. New faces that are added to an existing large person group will use the recognition model that's // already associated with the collection. Existing face features in a large person group can't be updated to features -// extracted by another version of recognition model. -// * 'recognition_01': The default recognition model for [LargePersonGroup - -// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup/create). All those large person -// groups created before 2019 March are bonded with this recognition model. -// * 'recognition_02': Recognition model released in 2019 March. -// * 'recognition_03': Recognition model released in 2020 May. 'recognition_03' is recommended since its overall -// accuracy is improved compared with 'recognition_01' and 'recognition_02'. +// extracted by another version of recognition model. Please refer to [Specify a face recognition +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model). // // Large person group quota: // * Free-tier subscription quota: 1,000 large person groups. @@ -214,8 +206,8 @@ func (client LargePersonGroupClient) DeleteResponder(resp *http.Response) (resul // Get retrieve the information of a large person group, including its name, userData and recognitionModel. This API // returns large person group information only, use [LargePersonGroup Person - -// List](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroupperson/list) instead to retrieve -// person information under the large person group. +// List](https://docs.microsoft.com/rest/api/faceapi/largepersongroupperson/list) instead to retrieve person +// information under the large person group. // Parameters: // largePersonGroupID - id referencing a particular large person group. // returnRecognitionModel - a value indicating whether the operation should return 'recognitionModel' in diff --git a/services/cognitiveservices/v1.0/face/largepersongroupperson.go b/services/cognitiveservices/v1.0/face/largepersongroupperson.go index c5c873f0e42d..6d07d16c80a5 100644 --- a/services/cognitiveservices/v1.0/face/largepersongroupperson.go +++ b/services/cognitiveservices/v1.0/face/largepersongroupperson.go @@ -31,21 +31,19 @@ func NewLargePersonGroupPersonClient(endpoint string) LargePersonGroupPersonClie // with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It // returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will // be stored on server until [LargePersonGroup PersonFace - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroupperson/deleteface), -// [LargePersonGroup Person - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroupperson/delete) or -// [LargePersonGroup - Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup/delete) is -// called. +// Delete](https://docs.microsoft.com/rest/api/faceapi/largepersongroupperson/deleteface), [LargePersonGroup Person - +// Delete](https://docs.microsoft.com/rest/api/faceapi/largepersongroupperson/delete) or [LargePersonGroup - +// Delete](https://docs.microsoft.com/rest/api/faceapi/largepersongroup/delete) is called. //
Note persistedFaceId is different from faceId generated by [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl). // * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, // and face size is 200x200 pixels (100 pixels between eyes) or bigger. // * Each person entry can hold up to 248 faces. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the // provided "targetFace" rectangle is not returned from [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), there’s no guarantee to -// detect and add the face successfully. +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl), there’s no guarantee to detect and add the +// face successfully. // * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures. // * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from // different persons are processed in parallel. @@ -53,15 +51,7 @@ func NewLargePersonGroupPersonClient(endpoint string) LargePersonGroupPersonClie // dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size. // * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to // [How to specify a detection -// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'detection_01': | The default detection model for [LargePersonGroup Person - Add -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroupperson/addfacefromurl). Recommend -// for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or -// wrong image orientation, the faces in such cases may not be detected. | -// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry -// faces. | +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model). // Parameters: // largePersonGroupID - id referencing a particular large person group. // personID - id referencing a particular person. @@ -174,21 +164,19 @@ func (client LargePersonGroupPersonClient) AddFaceFromStreamResponder(resp *http // with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It // returns a persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will // be stored on server until [LargePersonGroup PersonFace - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroupperson/deleteface), -// [LargePersonGroup Person - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroupperson/delete) or -// [LargePersonGroup - Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup/delete) is -// called. +// Delete](https://docs.microsoft.com/rest/api/faceapi/largepersongroupperson/deleteface), [LargePersonGroup Person - +// Delete](https://docs.microsoft.com/rest/api/faceapi/largepersongroupperson/delete) or [LargePersonGroup - +// Delete](https://docs.microsoft.com/rest/api/faceapi/largepersongroup/delete) is called. //
Note persistedFaceId is different from faceId generated by [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl). // * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, // and face size is 200x200 pixels (100 pixels between eyes) or bigger. // * Each person entry can hold up to 248 faces. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the // provided "targetFace" rectangle is not returned from [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), there’s no guarantee to -// detect and add the face successfully. +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl), there’s no guarantee to detect and add the +// face successfully. // * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures. // * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from // different persons are processed in parallel. @@ -197,14 +185,6 @@ func (client LargePersonGroupPersonClient) AddFaceFromStreamResponder(resp *http // * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to // [How to specify a detection // model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'detection_01': | The default detection model for [LargePersonGroup Person - Add -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroupperson/addfacefromurl). Recommend -// for near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or -// wrong image orientation, the faces in such cases may not be detected. | -// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry -// faces. | // Parameters: // largePersonGroupID - id referencing a particular large person group. // personID - id referencing a particular person. diff --git a/services/cognitiveservices/v1.0/face/list.go b/services/cognitiveservices/v1.0/face/list.go index 30d00fedbff9..39d3ada07b22 100644 --- a/services/cognitiveservices/v1.0/face/list.go +++ b/services/cognitiveservices/v1.0/face/list.go @@ -31,17 +31,17 @@ func NewListClient(endpoint string) ListClient { //
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace // rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted // face feature will be stored on server until [FaceList - Delete -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/deleteface) or [FaceList - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/delete) is called. +// Face](https://docs.microsoft.com/rest/api/faceapi/facelist/deleteface) or [FaceList - +// Delete](https://docs.microsoft.com/rest/api/faceapi/facelist/delete) is called. //
Note persistedFaceId is different from faceId generated by [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl). // * Higher face image quality means better detection and recognition precision. Please consider high-quality faces: // frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the // provided "targetFace" rectangle is not returned from [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), there’s no guarantee to -// detect and add the face successfully. +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl), there’s no guarantee to detect and add the +// face successfully. // * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures. // * Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in // parallel. @@ -49,15 +49,7 @@ func NewListClient(endpoint string) ListClient { // dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size. // * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to // [How to specify a detection -// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'detection_01': | The default detection model for [FaceList - Add -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/addfacefromurl). Recommend for near -// frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong -// image orientation, the faces in such cases may not be detected. | -// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry -// faces. | +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model). // Parameters: // faceListID - id referencing a particular face list. // imageParameter - an image stream. @@ -168,17 +160,17 @@ func (client ListClient) AddFaceFromStreamResponder(resp *http.Response) (result //
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace // rectangle. It returns a persistedFaceId representing the added face. No image will be stored. Only the extracted // face feature will be stored on server until [FaceList - Delete -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/deleteface) or [FaceList - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/delete) is called. +// Face](https://docs.microsoft.com/rest/api/faceapi/facelist/deleteface) or [FaceList - +// Delete](https://docs.microsoft.com/rest/api/faceapi/facelist/delete) is called. //
Note persistedFaceId is different from faceId generated by [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl). // * Higher face image quality means better detection and recognition precision. Please consider high-quality faces: // frontal, clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the // provided "targetFace" rectangle is not returned from [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), there’s no guarantee to -// detect and add the face successfully. +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl), there’s no guarantee to detect and add the +// face successfully. // * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures. // * Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in // parallel. @@ -186,15 +178,7 @@ func (client ListClient) AddFaceFromStreamResponder(resp *http.Response) (result // dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size. // * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to // [How to specify a detection -// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'detection_01': | The default detection model for [FaceList - Add -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/addfacefromurl). Recommend for near -// frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong -// image orientation, the faces in such cases may not be detected. | -// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry -// faces. | +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model). // Parameters: // faceListID - id referencing a particular face list. // imageURL - a JSON document with a URL pointing to the image that is to be analyzed. @@ -306,29 +290,25 @@ func (client ListClient) AddFaceFromURLResponder(resp *http.Response) (result Pe // Create create an empty face list with user-specified faceListId, name, an optional userData and recognitionModel. Up // to 64 face lists are allowed in one subscription. //
Face list is a list of faces, up to 1,000 faces, and used by [Face - Find -// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar). +// Similar](https://docs.microsoft.com/rest/api/faceapi/face/findsimilar). //
After creation, user should use [FaceList - Add -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/addfacefromurl) to import the faces. No -// image will be stored. Only the extracted face features are stored on server until [FaceList - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/delete) is called. +// Face](https://docs.microsoft.com/rest/api/faceapi/facelist/addfacefromurl) to import the faces. No image will be +// stored. Only the extracted face features are stored on server until [FaceList - +// Delete](https://docs.microsoft.com/rest/api/faceapi/facelist/delete) is called. //
Find Similar is used for scenario like finding celebrity-like faces, similar face filtering, or as a light // way face identification. But if the actual use is to identify person, please use -// [PersonGroup](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup) / -// [LargePersonGroup](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup) and [Face - -// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify). -//
Please consider [LargeFaceList](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist) -// when the face number is large. It can support up to 1,000,000 faces. +// [PersonGroup](https://docs.microsoft.com/rest/api/faceapi/persongroup) / +// [LargePersonGroup](https://docs.microsoft.com/rest/api/faceapi/largepersongroup) and [Face - +// Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify). +//
Please consider [LargeFaceList](https://docs.microsoft.com/rest/api/faceapi/largefacelist) when the face +// number is large. It can support up to 1,000,000 faces. //
'recognitionModel' should be specified to associate with this face list. The default value for // 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly specify the model you need in // this parameter. New faces that are added to an existing face list will use the recognition model that's already // associated with the collection. Existing face features in a face list can't be updated to features extracted by // another version of recognition model. -// * 'recognition_01': The default recognition model for [FaceList- -// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/create). All those face lists created -// before 2019 March are bonded with this recognition model. -// * 'recognition_02': Recognition model released in 2019 March. -// * 'recognition_03': Recognition model released in 2020 May. 'recognition_03' is recommended since its overall -// accuracy is improved compared with 'recognition_01' and 'recognition_02'. +// Please Refer to [Specify a face recognition +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model). // Parameters: // faceListID - id referencing a particular face list. // body - request body for creating a face list. @@ -656,7 +636,7 @@ func (client ListClient) GetResponder(resp *http.Response) (result List, err err // List list face lists’ faceListId, name, userData and recognitionModel.
// To get face information inside faceList use [FaceList - -// Get](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/get) +// Get](https://docs.microsoft.com/rest/api/faceapi/facelist/get) // Parameters: // returnRecognitionModel - a value indicating whether the operation should return 'recognitionModel' in // response. diff --git a/services/cognitiveservices/v1.0/face/models.go b/services/cognitiveservices/v1.0/face/models.go index 745ba8498a98..e2d75be46f91 100644 --- a/services/cognitiveservices/v1.0/face/models.go +++ b/services/cognitiveservices/v1.0/face/models.go @@ -17,7 +17,7 @@ const fqdn = "github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/ // Accessory accessory item and corresponding confidence level. type Accessory struct { - // Type - Type of an accessory. Possible values include: 'HeadWear', 'Glasses', 'Mask' + // Type - Type of an accessory. Possible values include: 'AccessoryTypeHeadWear', 'AccessoryTypeGlasses', 'AccessoryTypeMask' Type AccessoryType `json:"type,omitempty"` // Confidence - Confidence level of an accessory Confidence *float64 `json:"confidence,omitempty"` @@ -54,7 +54,7 @@ type Attributes struct { Emotion *Emotion `json:"emotion,omitempty"` // Hair - Properties describing hair attributes. Hair *Hair `json:"hair,omitempty"` - // Makeup - Properties describing present makeups on a given face. + // Makeup - Properties describing the presence of makeup on a given face. Makeup *Makeup `json:"makeup,omitempty"` // Occlusion - Properties describing occlusions on a given face. Occlusion *Occlusion `json:"occlusion,omitempty"` @@ -66,6 +66,8 @@ type Attributes struct { Exposure *Exposure `json:"exposure,omitempty"` // Noise - Properties describing noise level of the image. Noise *Noise `json:"noise,omitempty"` + // Mask - Properties describing the presence of a mask on a given face. + Mask *Mask `json:"mask,omitempty"` } // Blur properties describing any presence of blur within the image. @@ -87,7 +89,7 @@ type Coordinate struct { // DetectedFace detected Face object. type DetectedFace struct { FaceID *uuid.UUID `json:"faceId,omitempty"` - // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03' + // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03', 'Recognition04' RecognitionModel RecognitionModel `json:"recognitionModel,omitempty"` FaceRectangle *Rectangle `json:"faceRectangle,omitempty"` FaceLandmarks *Landmarks `json:"faceLandmarks,omitempty"` @@ -129,13 +131,13 @@ type FacialHair struct { // FindSimilarRequest request body for find similar operation. type FindSimilarRequest struct { - // FaceID - FaceId of the query face. User needs to call Face - Detect first to get a valid faceId. Note that this faceId is not persisted and will expire 24 hours after the detection call + // FaceID - FaceId of the query face. User needs to call Face - Detect first to get a valid faceId. Note that this faceId is not persisted and will expire at the time specified by faceIdTimeToLive after the detection call FaceID *uuid.UUID `json:"faceId,omitempty"` // FaceListID - An existing user-specified unique candidate face list, created in Face List - Create a Face List. Face list contains a set of persistedFaceIds which are persisted and will never expire. Parameter faceListId, largeFaceListId and faceIds should not be provided at the same time. FaceListID *string `json:"faceListId,omitempty"` // LargeFaceListID - An existing user-specified unique candidate large face list, created in LargeFaceList - Create. Large face list contains a set of persistedFaceIds which are persisted and will never expire. Parameter faceListId, largeFaceListId and faceIds should not be provided at the same time. LargeFaceListID *string `json:"largeFaceListId,omitempty"` - // FaceIds - An array of candidate faceIds. All of them are created by Face - Detect and the faceIds will expire 24 hours after the detection call. The number of faceIds is limited to 1000. Parameter faceListId, largeFaceListId and faceIds should not be provided at the same time. + // FaceIds - An array of candidate faceIds. All of them are created by Face - Detect and the faceIds will expire at the time specified by faceIdTimeToLive after the detection call. The number of faceIds is limited to 1000. Parameter faceListId, largeFaceListId and faceIds should not be provided at the same time. FaceIds *[]uuid.UUID `json:"faceIds,omitempty"` // MaxNumOfCandidatesReturned - The number of top similar faces returned. The valid range is [1, 1000]. MaxNumOfCandidatesReturned *int32 `json:"maxNumOfCandidatesReturned,omitempty"` @@ -199,7 +201,7 @@ type IdentifyRequest struct { PersonGroupID *string `json:"personGroupId,omitempty"` // LargePersonGroupID - LargePersonGroupId of the target large person group, created by LargePersonGroup - Create. Parameter personGroupId and largePersonGroupId should not be provided at the same time. LargePersonGroupID *string `json:"largePersonGroupId,omitempty"` - // MaxNumOfCandidatesReturned - The range of maxNumOfCandidatesReturned is between 1 and 5 (default is 1). + // MaxNumOfCandidatesReturned - The range of maxNumOfCandidatesReturned is between 1 and 100 (default is 1). MaxNumOfCandidatesReturned *int32 `json:"maxNumOfCandidatesReturned,omitempty"` // ConfidenceThreshold - Confidence threshold of identification, used to judge whether one face belong to one person. The range of confidenceThreshold is [0, 1] (default specified by algorithm). ConfidenceThreshold *float64 `json:"confidenceThreshold,omitempty"` @@ -256,7 +258,7 @@ type LargeFaceList struct { autorest.Response `json:"-"` // LargeFaceListID - LargeFaceListId of the target large face list. LargeFaceListID *string `json:"largeFaceListId,omitempty"` - // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03' + // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03', 'Recognition04' RecognitionModel RecognitionModel `json:"recognitionModel,omitempty"` // Name - User defined name, maximum length is 128. Name *string `json:"name,omitempty"` @@ -269,7 +271,7 @@ type LargePersonGroup struct { autorest.Response `json:"-"` // LargePersonGroupID - LargePersonGroupId of the target large person groups LargePersonGroupID *string `json:"largePersonGroupId,omitempty"` - // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03' + // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03', 'Recognition04' RecognitionModel RecognitionModel `json:"recognitionModel,omitempty"` // Name - User defined name, maximum length is 128. Name *string `json:"name,omitempty"` @@ -284,7 +286,7 @@ type List struct { FaceListID *string `json:"faceListId,omitempty"` // PersistedFaces - Persisted faces within the face list. PersistedFaces *[]PersistedFace `json:"persistedFaces,omitempty"` - // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03' + // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03', 'Recognition04' RecognitionModel RecognitionModel `json:"recognitionModel,omitempty"` // Name - User defined name, maximum length is 128. Name *string `json:"name,omitempty"` @@ -352,7 +354,7 @@ type ListSnapshot struct { Value *[]Snapshot `json:"value,omitempty"` } -// Makeup properties describing present makeups on a given face. +// Makeup properties describing the presence of makeup on a given face. type Makeup struct { // EyeMakeup - A boolean value describing whether eye makeup is present on a face. EyeMakeup *bool `json:"eyeMakeup,omitempty"` @@ -360,10 +362,18 @@ type Makeup struct { LipMakeup *bool `json:"lipMakeup,omitempty"` } +// Mask properties describing the presence of a mask on a given face. +type Mask struct { + // Type - Mask type if any of the face. Possible values include: 'NoMask', 'FaceMask', 'OtherMaskOrOcclusion', 'Uncertain' + Type MaskType `json:"type,omitempty"` + // NoseAndMouthCovered - A boolean value indicating whether nose and mouth are covered. + NoseAndMouthCovered *bool `json:"noseAndMouthCovered,omitempty"` +} + // MetaDataContract a combination of user defined name and user specified data and recognition model name // for largePersonGroup/personGroup, and largeFaceList/faceList. type MetaDataContract struct { - // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03' + // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03', 'Recognition04' RecognitionModel RecognitionModel `json:"recognitionModel,omitempty"` // Name - User defined name, maximum length is 128. Name *string `json:"name,omitempty"` @@ -388,6 +398,15 @@ type Noise struct { Value *float64 `json:"value,omitempty"` } +// NonNullableNameAndNullableUserDataContract a combination of user defined name and user specified data +// for the person, largePersonGroup/personGroup, and largeFaceList/faceList. +type NonNullableNameAndNullableUserDataContract struct { + // Name - User defined name, maximum length is 128. + Name *string `json:"name,omitempty"` + // UserData - User specified data. Length should not exceed 16KB. + UserData *string `json:"userData,omitempty"` +} + // Occlusion properties describing occlusions on a given face. type Occlusion struct { // ForeheadOccluded - A boolean value indicating whether forehead is occluded. @@ -417,7 +436,7 @@ type OperationStatus struct { // PersistedFace personFace object. type PersistedFace struct { autorest.Response `json:"-"` - // PersistedFaceID - The persistedFaceId of the target face, which is persisted and will not expire. Different from faceId created by Face - Detect and will expire in 24 hours after the detection call. + // PersistedFaceID - The persistedFaceId of the target face, which is persisted and will not expire. Different from faceId created by Face - Detect and will expire in at the time specified by faceIdTimeToLive after the detection call. PersistedFaceID *uuid.UUID `json:"persistedFaceId,omitempty"` // UserData - User-provided data attached to the face. The size limit is 1KB. UserData *string `json:"userData,omitempty"` @@ -441,7 +460,7 @@ type PersonGroup struct { autorest.Response `json:"-"` // PersonGroupID - PersonGroupId of the target person group. PersonGroupID *string `json:"personGroupId,omitempty"` - // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03' + // RecognitionModel - Possible values include: 'Recognition01', 'Recognition02', 'Recognition03', 'Recognition04' RecognitionModel RecognitionModel `json:"recognitionModel,omitempty"` // Name - User defined name, maximum length is 128. Name *string `json:"name,omitempty"` @@ -463,7 +482,7 @@ type Rectangle struct { // SimilarFace response body for find similar face operation. type SimilarFace struct { - // FaceID - FaceId of candidate face when find by faceIds. faceId is created by Face - Detect and will expire 24 hours after the detection call + // FaceID - FaceId of candidate face when find by faceIds. faceId is created by Face - Detect and will expire at the time specified by faceIdTimeToLive after the detection call FaceID *uuid.UUID `json:"faceId,omitempty"` // PersistedFaceID - PersistedFaceId of candidate face when find by faceListId. persistedFaceId in face list is persisted and will not expire. As showed in below response PersistedFaceID *uuid.UUID `json:"persistedFaceId,omitempty"` diff --git a/services/cognitiveservices/v1.0/face/persongroup.go b/services/cognitiveservices/v1.0/face/persongroup.go index 851611ee3ccd..ab3b1df8524d 100644 --- a/services/cognitiveservices/v1.0/face/persongroup.go +++ b/services/cognitiveservices/v1.0/face/persongroup.go @@ -28,31 +28,23 @@ func NewPersonGroupClient(endpoint string) PersonGroupClient { // Create create a new person group with specified personGroupId, name, user-provided userData and recognitionModel. //
A person group is the container of the uploaded person data, including face recognition features. //
After creation, use [PersonGroup Person - -// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/create) to add persons into the -// group, and then call [PersonGroup - -// Train](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup/train) to get this group ready for -// [Face - Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify). +// Create](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/create) to add persons into the group, and +// then call [PersonGroup - Train](https://docs.microsoft.com/rest/api/faceapi/persongroup/train) to get this group +// ready for [Face - Identify](https://docs.microsoft.com/rest/api/faceapi/face/identify). //
No image will be stored. Only the person's extracted face features and userData will be stored on server -// until [PersonGroup Person - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/delete) or [PersonGroup - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup/delete) is called. +// until [PersonGroup Person - Delete](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/delete) or +// [PersonGroup - Delete](https://docs.microsoft.com/rest/api/faceapi/persongroup/delete) is called. //
'recognitionModel' should be specified to associate with this person group. The default value for // 'recognitionModel' is 'recognition_01', if the latest model needed, please explicitly specify the model you need in // this parameter. New faces that are added to an existing person group will use the recognition model that's already // associated with the collection. Existing face features in a person group can't be updated to features extracted by // another version of recognition model. -// * 'recognition_01': The default recognition model for [PersonGroup - -// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup/create). All those person groups -// created before 2019 March are bonded with this recognition model. -// * 'recognition_02': Recognition model released in 2019 March. -// * 'recognition_03': Recognition model released in 2020 May. 'recognition_03' is recommended since its overall -// accuracy is improved compared with 'recognition_01' and 'recognition_02'. // // Person group quota: // * Free-tier subscription quota: 1,000 person groups. Each holds up to 1,000 persons. // * S0-tier subscription quota: 1,000,000 person groups. Each holds up to 10,000 persons. // * to handle larger scale face identification problem, please consider using -// [LargePersonGroup](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup). +// [LargePersonGroup](https://docs.microsoft.com/rest/api/faceapi/largepersongroup). // Parameters: // personGroupID - id referencing a particular person group. // body - request body for creating new person group. @@ -211,7 +203,7 @@ func (client PersonGroupClient) DeleteResponder(resp *http.Response) (result aut } // Get retrieve person group name, userData and recognitionModel. To get person information under this personGroup, use -// [PersonGroup Person - List](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/list). +// [PersonGroup Person - List](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/list). // Parameters: // personGroupID - id referencing a particular person group. // returnRecognitionModel - a value indicating whether the operation should return 'recognitionModel' in diff --git a/services/cognitiveservices/v1.0/face/persongroupperson.go b/services/cognitiveservices/v1.0/face/persongroupperson.go index ccd43349a560..dc7baf999719 100644 --- a/services/cognitiveservices/v1.0/face/persongroupperson.go +++ b/services/cognitiveservices/v1.0/face/persongroupperson.go @@ -31,19 +31,19 @@ func NewPersonGroupPersonClient(endpoint string) PersonGroupPersonClient { // an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a // persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored // on server until [PersonGroup PersonFace - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/deleteface), [PersonGroup -// Person - Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/delete) or -// [PersonGroup - Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup/delete) is called. +// Delete](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/deleteface), [PersonGroup Person - +// Delete](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/delete) or [PersonGroup - +// Delete](https://docs.microsoft.com/rest/api/faceapi/persongroup/delete) is called. //
Note persistedFaceId is different from faceId generated by [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl). // * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, // clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. // * Each person entry can hold up to 248 faces. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the // provided "targetFace" rectangle is not returned from [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), there’s no guarantee to -// detect and add the face successfully. +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl), there’s no guarantee to detect and add the +// face successfully. // * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause // failures. // * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from @@ -52,15 +52,7 @@ func NewPersonGroupPersonClient(endpoint string) PersonGroupPersonClient { // dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size. // * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to // [How to specify a detection -// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'detection_01': | The default detection model for [PersonGroup Person - Add -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/addfacefromurl). Recommend for -// near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong -// image orientation, the faces in such cases may not be detected. | -// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry -// faces. | +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model). // Parameters: // personGroupID - id referencing a particular person group. // personID - id referencing a particular person. @@ -173,19 +165,19 @@ func (client PersonGroupPersonClient) AddFaceFromStreamResponder(resp *http.Resp // image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a // persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored // on server until [PersonGroup PersonFace - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/deleteface), [PersonGroup -// Person - Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/delete) or -// [PersonGroup - Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup/delete) is called. +// Delete](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/deleteface), [PersonGroup Person - +// Delete](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/delete) or [PersonGroup - +// Delete](https://docs.microsoft.com/rest/api/faceapi/persongroup/delete) is called. //
Note persistedFaceId is different from faceId generated by [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl). // * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, // clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger. // * Each person entry can hold up to 248 faces. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the // provided "targetFace" rectangle is not returned from [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), there’s no guarantee to -// detect and add the face successfully. +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl), there’s no guarantee to detect and add the +// face successfully. // * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause // failures. // * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from @@ -194,15 +186,7 @@ func (client PersonGroupPersonClient) AddFaceFromStreamResponder(resp *http.Resp // dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size. // * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to // [How to specify a detection -// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) -// | Model | Recommended use-case(s) | -// | ---------- | -------- | -// | 'detection_01': | The default detection model for [PersonGroup Person - Add -// Face](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/addfacefromurl). Recommend for -// near frontal face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong -// image orientation, the faces in such cases may not be detected. | -// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry -// faces. | +// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model). // Parameters: // personGroupID - id referencing a particular person group. // personID - id referencing a particular person. @@ -906,19 +890,19 @@ func (client PersonGroupPersonClient) UpdateResponder(resp *http.Response) (resu // contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a // persistedFaceId representing the added face. No image will be stored. Only the extracted face feature will be stored // on server until [PersonGroup PersonFace - -// Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/deleteface), [PersonGroup -// Person - Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroupperson/delete) or -// [PersonGroup - Delete](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup/delete) is called. +// Delete](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/deleteface), [PersonGroup Person - +// Delete](https://docs.microsoft.com/rest/api/faceapi/persongroupperson/delete) or [PersonGroup - +// Delete](https://docs.microsoft.com/rest/api/faceapi/persongroup/delete) is called. //
Note persistedFaceId is different from faceId generated by [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl). // * Higher face image quality means better recognition precision. Please consider high-quality faces: frontal, clear, // and face size is 200x200 pixels (100 pixels between eyes) or bigger. // * Each person entry can hold up to 248 faces. // * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. // * "targetFace" rectangle should contain one face. Zero or multiple faces will be regarded as an error. If the // provided "targetFace" rectangle is not returned from [Face - -// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), there’s no guarantee to -// detect and add the face successfully. +// Detect](https://docs.microsoft.com/rest/api/faceapi/face/detectwithurl), there’s no guarantee to detect and add the +// face successfully. // * Out of detectable face size (36x36 - 4096x4096 pixels), large head-pose, or large occlusions will cause failures. // * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from // different persons are processed in parallel.