From 1f941ccdd42ae3bbc12675aca84d759117babc62 Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Thu, 6 Jun 2019 08:49:06 -0700 Subject: [PATCH 1/6] [AutoPR cognitiveservices/data-plane/Face] [FaceAPI] Add detection model argument on Detect and AddFace methods (#5667) * Generated from 1d8183de7f54b28cda9df52c1d35e6484d49acac [FaceAPI] Add detection model argument on Detect and AddFace methods * Generated from 4ef1c813ae58c6edbad72d07f42a57db72db5508 Update detect and detection model-related descriptions * Generated from 3445d5ce23ebb7d0321ba0a7991075d735fa3678 [FaceAPI] Amend DetectionModel parameter description * Generated from 7c460c476831442f5ada9fc57e8892f61bff939e [FaceAPI] Make Add Faces description consistent accross multiple calls * Generated from 11957f52387ddc031a08403fcbb35bf09195ca95 [FaceAPI] Correct description of PersonGroup Person - Add Face * Generated from d7666d6019f3a8bedf2596cea2995f4a50585f61 [FaceAPI] Remove all mentions of large groups in persongroup description --- .../vision/face/models/__init__.py | 2 + .../vision/face/models/face_client_enums.py | 6 + .../face/operations/face_list_operations.py | 84 +++++++++- .../vision/face/operations/face_operations.py | 150 +++++++++++++++--- .../operations/large_face_list_operations.py | 89 ++++++++++- .../large_person_group_person_operations.py | 89 ++++++++++- .../person_group_person_operations.py | 111 +++++++++++-- 7 files changed, 478 insertions(+), 53 deletions(-) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/__init__.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/__init__.py index 39c2758f7e00..eb488346f616 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/__init__.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/__init__.py @@ -114,6 +114,7 @@ SnapshotObjectType, OperationStatusType, FaceAttributeType, + DetectionModel, ) __all__ = [ @@ -175,4 +176,5 @@ 'SnapshotObjectType', 'OperationStatusType', 'FaceAttributeType', + 'DetectionModel', ] diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_client_enums.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_client_enums.py index 6eb661a0b2c3..b3fed2fd6d7f 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_client_enums.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_client_enums.py @@ -123,3 +123,9 @@ class FaceAttributeType(str, Enum): blur = "blur" exposure = "exposure" noise = "noise" + + +class DetectionModel(str, Enum): + + detection_01 = "detection_01" + detection_02 = "detection_02" diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_list_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_list_operations.py index 5e8c39a10751..9a7420dea8ff 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_list_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_list_operations.py @@ -397,7 +397,7 @@ def delete_face( delete_face.metadata = {'url': '/facelists/{faceListId}/persistedfaces/{persistedFaceId}'} def add_face_from_url( - self, face_list_id, url, user_data=None, target_face=None, custom_headers=None, raw=False, **operation_config): + self, face_list_id, url, user_data=None, target_face=None, detection_model="detection_01", custom_headers=None, raw=False, **operation_config): """Add a face to a specified face list, up to 1,000 faces.
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a @@ -425,6 +425,21 @@ def add_face_from_url( head-pose, or large occlusions will cause failures. * Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in parallel. + * The minimum detectable face size is 36x36 pixels in an image no + larger than 1920x1080 pixels. Images with dimensions higher than + 1920x1080 pixels will need a proportionally larger minimum face size. + * Different 'detectionModel' values can be provided. To use and compare + different detection models, please refer to [How to specify a detection + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'detection_01': | The default detection model for [FaceList - Add + Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250). + Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong + image orientation, the faces in such cases may not be detected. | + | 'detection_02': | Detection model released in 2019 May with improved + accuracy especially on small, side and blurry faces. |. :param face_list_id: Id referencing a particular face list. :type face_list_id: str @@ -439,6 +454,14 @@ def add_face_from_url( image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. :type target_face: list[int] + :param detection_model: Name of detection model. Detection model is + used to detect faces in the submitted image. A detection model name + can be provided when performing Face - Detect or (Large)FaceList - Add + Face or (Large)PersonGroup - Add Face. The default value is + 'detection_01', if another model is needed, please explicitly specify + it. Possible values include: 'detection_01', 'detection_02' + :type detection_model: str or + ~azure.cognitiveservices.vision.face.models.DetectionModel :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -466,6 +489,8 @@ def add_face_from_url( query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024) if target_face is not None: query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',') + if detection_model is not None: + query_parameters['detectionModel'] = self._serialize.query("detection_model", detection_model, 'str') # Construct headers header_parameters = {} @@ -497,10 +522,49 @@ def add_face_from_url( add_face_from_url.metadata = {'url': '/facelists/{faceListId}/persistedfaces'} def add_face_from_stream( - self, face_list_id, image, user_data=None, target_face=None, custom_headers=None, raw=False, callback=None, **operation_config): - """Add a face to a face list. The input face is specified as an image with - a targetFace rectangle. It returns a persistedFaceId representing the - added face, and persistedFaceId will not expire. + self, face_list_id, image, user_data=None, target_face=None, detection_model="detection_01", custom_headers=None, raw=False, callback=None, **operation_config): + """Add a face to a specified face list, up to 1,000 faces. +
To deal with an image contains multiple faces, input face can be + specified as an image with a targetFace rectangle. It returns a + persistedFaceId representing the added face. No image will be stored. + Only the extracted face feature will be stored on server until + [FaceList - Delete + Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395251) + or [FaceList - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039524f) + is called. +
Note persistedFaceId is different from faceId generated by [Face + - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). + * Higher face image quality means better detection and recognition + precision. Please consider high-quality faces: frontal, clear, and face + size is 200x200 pixels (100 pixels between eyes) or bigger. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The + allowed image file size is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple + faces will be regarded as an error. If the provided "targetFace" + rectangle is not returned from [Face - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), + there’s no guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large + head-pose, or large occlusions will cause failures. + * Adding/deleting faces to/from a same face list are processed + sequentially and to/from different face lists are in parallel. + * The minimum detectable face size is 36x36 pixels in an image no + larger than 1920x1080 pixels. Images with dimensions higher than + 1920x1080 pixels will need a proportionally larger minimum face size. + * Different 'detectionModel' values can be provided. To use and compare + different detection models, please refer to [How to specify a detection + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'detection_01': | The default detection model for [FaceList - Add + Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395250). + Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong + image orientation, the faces in such cases may not be detected. | + | 'detection_02': | Detection model released in 2019 May with improved + accuracy especially on small, side and blurry faces. |. :param face_list_id: Id referencing a particular face list. :type face_list_id: str @@ -515,6 +579,14 @@ def add_face_from_stream( image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. :type target_face: list[int] + :param detection_model: Name of detection model. Detection model is + used to detect faces in the submitted image. A detection model name + can be provided when performing Face - Detect or (Large)FaceList - Add + Face or (Large)PersonGroup - Add Face. The default value is + 'detection_01', if another model is needed, please explicitly specify + it. Possible values include: 'detection_01', 'detection_02' + :type detection_model: str or + ~azure.cognitiveservices.vision.face.models.DetectionModel :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -545,6 +617,8 @@ def add_face_from_stream( query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024) if target_face is not None: query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',') + if detection_model is not None: + query_parameters['detectionModel'] = self._serialize.query("detection_model", detection_model, 'str') # Construct headers header_parameters = {} diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_operations.py index 85f8696dbe58..6339928243a6 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_operations.py @@ -397,13 +397,9 @@ def verify_face_to_face( verify_face_to_face.metadata = {'url': '/verify'} def detect_with_url( - self, url, return_face_id=True, return_face_landmarks=False, return_face_attributes=None, recognition_model="recognition_01", return_recognition_model=False, custom_headers=None, raw=False, **operation_config): + self, url, return_face_id=True, return_face_landmarks=False, return_face_attributes=None, recognition_model="recognition_01", return_recognition_model=False, detection_model="detection_01", custom_headers=None, raw=False, **operation_config): """Detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and attributes.
- * Optional parameters including faceId, landmarks, and attributes. - Attributes include age, gender, headPose, smile, facialHair, glasses, - emotion, hair, makeup, occlusion, accessories, blur, exposure and - noise. * No image will be stored. Only the extracted face feature will be stored on server. The faceId is an identifier of the face feature and will be used in [Face - @@ -412,23 +408,40 @@ def detect_with_url( Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), and [Face - Find Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). - It will expire 24 hours after the detection call. - * Higher face image quality means better detection and recognition - precision. Please consider high-quality faces: frontal, clear, and face - size is 200x200 pixels (100 pixels between eyes) or bigger. + The stored face feature(s) will expire and be deleted 24 hours after + the original detection call. + * Optional parameters include faceId, landmarks, and attributes. + Attributes include age, gender, headPose, smile, facialHair, glasses, + emotion, hair, makeup, occlusion, accessories, blur, exposure and + noise. Some of the results returned for specific attributes may not be + highly accurate. * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB. - * Faces are detectable when its size is 36x36 to 4096x4096 pixels. If - need to detect very small but clear faces, please try to enlarge the - input image. - * Up to 64 faces can be returned for an image. Faces are ranked by face - rectangle size from large to small. - * Face detector prefer frontal and near-frontal faces. There are cases - that faces may not be detected, e.g. exceptionally large face angles - (head-pose) or being occluded, or wrong image orientation. - * Attributes (age, gender, headPose, smile, facialHair, glasses, - emotion, hair, makeup, occlusion, accessories, blur, exposure and - noise) may not be perfectly accurate. + * Up to 100 faces can be returned for an image. Faces are ranked by + face rectangle size from large to small. + * For optimal results when querying [Face - + Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), + [Face - + Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), + and [Face - Find + Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) + ('returnFaceId' is true), please use faces that are: frontal, clear, + and with a minimum size of 200x200 pixels (100 pixels between eyes). + * The minimum detectable face size is 36x36 pixels in an image no + larger than 1920x1080 pixels. Images with dimensions higher than + 1920x1080 pixels will need a proportionally larger minimum face size. + * Different 'detectionModel' values can be provided. To use and compare + different detection models, please refer to [How to specify a detection + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'detection_01': | The default detection model for [Face - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). + Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong + image orientation, the faces in such cases may not be detected. | + | 'detection_02': | Detection model released in 2019 May with improved + accuracy especially on small, side and blurry faces. | * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are needed, please specify the recognition model with 'recognitionModel' parameter. The @@ -437,7 +450,16 @@ def detect_with_url( parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More details, please refer to [How to specify a recognition - model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model). + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'recognition_01': | The default recognition model for [Face - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). + All those faceIds created before 2019 March are bonded with this + recognition model. | + | 'recognition_02': | Recognition model released in 2019 March. + 'recognition_02' is recommended since its overall accuracy is improved + compared with 'recognition_01'. |. :param url: Publicly reachable URL of an image :type url: str @@ -468,6 +490,14 @@ def detect_with_url( :param return_recognition_model: A value indicating whether the operation should return 'recognitionModel' in response. :type return_recognition_model: bool + :param detection_model: Name of detection model. Detection model is + used to detect faces in the submitted image. A detection model name + can be provided when performing Face - Detect or (Large)FaceList - Add + Face or (Large)PersonGroup - Add Face. The default value is + 'detection_01', if another model is needed, please explicitly specify + it. Possible values include: 'detection_01', 'detection_02' + :type detection_model: str or + ~azure.cognitiveservices.vision.face.models.DetectionModel :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -500,6 +530,8 @@ def detect_with_url( query_parameters['recognitionModel'] = self._serialize.query("recognition_model", recognition_model, 'str') if return_recognition_model is not None: query_parameters['returnRecognitionModel'] = self._serialize.query("return_recognition_model", return_recognition_model, 'bool') + if detection_model is not None: + query_parameters['detectionModel'] = self._serialize.query("detection_model", detection_model, 'str') # Construct headers header_parameters = {} @@ -604,9 +636,69 @@ def verify_face_to_person( verify_face_to_person.metadata = {'url': '/verify'} def detect_with_stream( - self, image, return_face_id=True, return_face_landmarks=False, return_face_attributes=None, recognition_model="recognition_01", return_recognition_model=False, custom_headers=None, raw=False, callback=None, **operation_config): - """Detect human faces in an image and returns face locations, and - optionally with faceIds, landmarks, and attributes. + self, image, return_face_id=True, return_face_landmarks=False, return_face_attributes=None, recognition_model="recognition_01", return_recognition_model=False, detection_model="detection_01", custom_headers=None, raw=False, callback=None, **operation_config): + """Detect human faces in an image, return face rectangles, and optionally + with faceIds, landmarks, and attributes.
+ * No image will be stored. Only the extracted face feature will be + stored on server. The faceId is an identifier of the face feature and + will be used in [Face - + Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), + [Face - + Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), + and [Face - Find + Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237). + The stored face feature(s) will expire and be deleted 24 hours after + the original detection call. + * Optional parameters include faceId, landmarks, and attributes. + Attributes include age, gender, headPose, smile, facialHair, glasses, + emotion, hair, makeup, occlusion, accessories, blur, exposure and + noise. Some of the results returned for specific attributes may not be + highly accurate. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The + allowed image file size is from 1KB to 6MB. + * Up to 100 faces can be returned for an image. Faces are ranked by + face rectangle size from large to small. + * For optimal results when querying [Face - + Identify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395239), + [Face - + Verify](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523a), + and [Face - Find + Similar](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395237) + ('returnFaceId' is true), please use faces that are: frontal, clear, + and with a minimum size of 200x200 pixels (100 pixels between eyes). + * The minimum detectable face size is 36x36 pixels in an image no + larger than 1920x1080 pixels. Images with dimensions higher than + 1920x1080 pixels will need a proportionally larger minimum face size. + * Different 'detectionModel' values can be provided. To use and compare + different detection models, please refer to [How to specify a detection + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'detection_01': | The default detection model for [Face - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). + Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong + image orientation, the faces in such cases may not be detected. | + | 'detection_02': | Detection model released in 2019 May with improved + accuracy especially on small, side and blurry faces. | + * Different 'recognitionModel' values are provided. If follow-up + operations like Verify, Identify, Find Similar are needed, please + specify the recognition model with 'recognitionModel' parameter. The + default value for 'recognitionModel' is 'recognition_01', if latest + model needed, please explicitly specify the model you need in this + parameter. Once specified, the detected faceIds will be associated with + the specified recognition model. More details, please refer to [How to + specify a recognition + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'recognition_01': | The default recognition model for [Face - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). + All those faceIds created before 2019 March are bonded with this + recognition model. | + | 'recognition_02': | Recognition model released in 2019 March. + 'recognition_02' is recommended since its overall accuracy is improved + compared with 'recognition_01'. |. :param image: An image stream. :type image: Generator @@ -637,6 +729,14 @@ def detect_with_stream( :param return_recognition_model: A value indicating whether the operation should return 'recognitionModel' in response. :type return_recognition_model: bool + :param detection_model: Name of detection model. Detection model is + used to detect faces in the submitted image. A detection model name + can be provided when performing Face - Detect or (Large)FaceList - Add + Face or (Large)PersonGroup - Add Face. The default value is + 'detection_01', if another model is needed, please explicitly specify + it. Possible values include: 'detection_01', 'detection_02' + :type detection_model: str or + ~azure.cognitiveservices.vision.face.models.DetectionModel :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -672,6 +772,8 @@ def detect_with_stream( query_parameters['recognitionModel'] = self._serialize.query("recognition_model", recognition_model, 'str') if return_recognition_model is not None: query_parameters['returnRecognitionModel'] = self._serialize.query("return_recognition_model", return_recognition_model, 'bool') + if detection_model is not None: + query_parameters['detectionModel'] = self._serialize.query("detection_model", detection_model, 'str') # Construct headers header_parameters = {} diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_face_list_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_face_list_operations.py index 33a5970a3d9e..ba70e8c6b02f 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_face_list_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_face_list_operations.py @@ -639,7 +639,7 @@ def update_face( update_face.metadata = {'url': '/largefacelists/{largeFaceListId}/persistedfaces/{persistedFaceId}'} def add_face_from_url( - self, large_face_list_id, url, user_data=None, target_face=None, custom_headers=None, raw=False, **operation_config): + self, large_face_list_id, url, user_data=None, target_face=None, detection_model="detection_01", custom_headers=None, raw=False, **operation_config): """Add a face to a specified large face list, up to 1,000,000 faces.
To deal with an image contains multiple faces, input face can be specified as an image with a targetFace rectangle. It returns a @@ -667,6 +667,22 @@ def add_face_from_url( head-pose, or large occlusions will cause failures. * Adding/deleting faces to/from a same face list are processed sequentially and to/from different face lists are in parallel. + * The minimum detectable face size is 36x36 pixels in an image no + larger than 1920x1080 pixels. Images with dimensions higher than + 1920x1080 pixels will need a proportionally larger minimum face size. + * Different 'detectionModel' values can be provided. To use and compare + different detection models, please refer to [How to specify a detection + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'detection_01': | The default detection model for [LargeFaceList - + Add + Face](/docs/services/563879b61984550e40cbbe8d/operations/5a158c10d2de3616c086f2d3). + Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong + image orientation, the faces in such cases may not be detected. | + | 'detection_02': | Detection model released in 2019 May with improved + accuracy especially on small, side and blurry faces. | Quota: * Free-tier subscription quota: 1,000 faces per large face list. * S0-tier subscription quota: 1,000,000 faces per large face list. @@ -685,6 +701,14 @@ def add_face_from_url( image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. :type target_face: list[int] + :param detection_model: Name of detection model. Detection model is + used to detect faces in the submitted image. A detection model name + can be provided when performing Face - Detect or (Large)FaceList - Add + Face or (Large)PersonGroup - Add Face. The default value is + 'detection_01', if another model is needed, please explicitly specify + it. Possible values include: 'detection_01', 'detection_02' + :type detection_model: str or + ~azure.cognitiveservices.vision.face.models.DetectionModel :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -712,6 +736,8 @@ def add_face_from_url( query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024) if target_face is not None: query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',') + if detection_model is not None: + query_parameters['detectionModel'] = self._serialize.query("detection_model", detection_model, 'str') # Construct headers header_parameters = {} @@ -810,10 +836,53 @@ def list_faces( list_faces.metadata = {'url': '/largefacelists/{largeFaceListId}/persistedfaces'} def add_face_from_stream( - self, large_face_list_id, image, user_data=None, target_face=None, custom_headers=None, raw=False, callback=None, **operation_config): - """Add a face to a large face list. The input face is specified as an - image with a targetFace rectangle. It returns a persistedFaceId - representing the added face, and persistedFaceId will not expire. + self, large_face_list_id, image, user_data=None, target_face=None, detection_model="detection_01", custom_headers=None, raw=False, callback=None, **operation_config): + """Add a face to a specified large face list, up to 1,000,000 faces. +
To deal with an image contains multiple faces, input face can be + specified as an image with a targetFace rectangle. It returns a + persistedFaceId representing the added face. No image will be stored. + Only the extracted face feature will be stored on server until + [LargeFaceList Face - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/5a158c8ad2de3616c086f2d4) + or [LargeFaceList - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/5a1580d5d2de3616c086f2cd) + is called. +
Note persistedFaceId is different from faceId generated by [Face + - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). + * Higher face image quality means better recognition precision. Please + consider high-quality faces: frontal, clear, and face size is 200x200 + pixels (100 pixels between eyes) or bigger. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The + allowed image file size is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple + faces will be regarded as an error. If the provided "targetFace" + rectangle is not returned from [Face - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), + there’s no guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large + head-pose, or large occlusions will cause failures. + * Adding/deleting faces to/from a same face list are processed + sequentially and to/from different face lists are in parallel. + * The minimum detectable face size is 36x36 pixels in an image no + larger than 1920x1080 pixels. Images with dimensions higher than + 1920x1080 pixels will need a proportionally larger minimum face size. + * Different 'detectionModel' values can be provided. To use and compare + different detection models, please refer to [How to specify a detection + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'detection_01': | The default detection model for [LargeFaceList - + Add + Face](/docs/services/563879b61984550e40cbbe8d/operations/5a158c10d2de3616c086f2d3). + Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong + image orientation, the faces in such cases may not be detected. | + | 'detection_02': | Detection model released in 2019 May with improved + accuracy especially on small, side and blurry faces. | + Quota: + * Free-tier subscription quota: 1,000 faces per large face list. + * S0-tier subscription quota: 1,000,000 faces per large face list. :param large_face_list_id: Id referencing a particular large face list. @@ -829,6 +898,14 @@ def add_face_from_stream( image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. :type target_face: list[int] + :param detection_model: Name of detection model. Detection model is + used to detect faces in the submitted image. A detection model name + can be provided when performing Face - Detect or (Large)FaceList - Add + Face or (Large)PersonGroup - Add Face. The default value is + 'detection_01', if another model is needed, please explicitly specify + it. Possible values include: 'detection_01', 'detection_02' + :type detection_model: str or + ~azure.cognitiveservices.vision.face.models.DetectionModel :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -859,6 +936,8 @@ def add_face_from_stream( query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024) if target_face is not None: query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',') + if detection_model is not None: + query_parameters['detectionModel'] = self._serialize.query("detection_model", detection_model, 'str') # Construct headers header_parameters = {} diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_person_group_person_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_person_group_person_operations.py index 28f5aacf3b41..a61caa0bdd7b 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_person_group_person_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_person_group_person_operations.py @@ -509,7 +509,7 @@ def update_face( update_face.metadata = {'url': '/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}'} def add_face_from_url( - self, large_person_group_id, person_id, url, user_data=None, target_face=None, custom_headers=None, raw=False, **operation_config): + self, large_person_group_id, person_id, url, user_data=None, target_face=None, detection_model="detection_01", custom_headers=None, raw=False, **operation_config): """Add a face to a person into a large person group for face identification or verification. To deal with an image contains multiple faces, input face can be specified as an image with a targetFace @@ -541,6 +541,22 @@ def add_face_from_url( * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel. + * The minimum detectable face size is 36x36 pixels in an image no + larger than 1920x1080 pixels. Images with dimensions higher than + 1920x1080 pixels will need a proportionally larger minimum face size. + * Different 'detectionModel' values can be provided. To use and compare + different detection models, please refer to [How to specify a detection + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'detection_01': | The default detection model for [LargePersonGroup + Person - Add + Face](/docs/services/563879b61984550e40cbbe8d/operations/599adf2a3a7b9412a4d53f42). + Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong + image orientation, the faces in such cases may not be detected. | + | 'detection_02': | Detection model released in 2019 May with improved + accuracy especially on small, side and blurry faces. |. :param large_person_group_id: Id referencing a particular large person group. @@ -558,6 +574,14 @@ def add_face_from_url( image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. :type target_face: list[int] + :param detection_model: Name of detection model. Detection model is + used to detect faces in the submitted image. A detection model name + can be provided when performing Face - Detect or (Large)FaceList - Add + Face or (Large)PersonGroup - Add Face. The default value is + 'detection_01', if another model is needed, please explicitly specify + it. Possible values include: 'detection_01', 'detection_02' + :type detection_model: str or + ~azure.cognitiveservices.vision.face.models.DetectionModel :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -586,6 +610,8 @@ def add_face_from_url( query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024) if target_face is not None: query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',') + if detection_model is not None: + query_parameters['detectionModel'] = self._serialize.query("detection_model", detection_model, 'str') # Construct headers header_parameters = {} @@ -617,9 +643,54 @@ def add_face_from_url( add_face_from_url.metadata = {'url': '/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces'} def add_face_from_stream( - self, large_person_group_id, person_id, image, user_data=None, target_face=None, custom_headers=None, raw=False, callback=None, **operation_config): - """Add a representative face to a person for identification. The input - face is specified as an image with a targetFace rectangle. + self, large_person_group_id, person_id, image, user_data=None, target_face=None, detection_model="detection_01", custom_headers=None, raw=False, callback=None, **operation_config): + """Add a face to a person into a large person group for face + identification or verification. To deal with an image contains multiple + faces, input face can be specified as an image with a targetFace + rectangle. It returns a persistedFaceId representing the added face. No + image will be stored. Only the extracted face feature will be stored on + server until [LargePersonGroup PersonFace - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ae2966ac60f11b48b5aa3), + [LargePersonGroup Person - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ade5c6ac60f11b48b5aa2) + or [LargePersonGroup - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/599adc216ac60f11b48b5a9f) + is called. +
Note persistedFaceId is different from faceId generated by [Face + - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). + * Higher face image quality means better recognition precision. Please + consider high-quality faces: frontal, clear, and face size is 200x200 + pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The + allowed image file size is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple + faces will be regarded as an error. If the provided "targetFace" + rectangle is not returned from [Face - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), + there’s no guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large + head-pose, or large occlusions will cause failures. + * Adding/deleting faces to/from a same person will be processed + sequentially. Adding/deleting faces to/from different persons are + processed in parallel. + * The minimum detectable face size is 36x36 pixels in an image no + larger than 1920x1080 pixels. Images with dimensions higher than + 1920x1080 pixels will need a proportionally larger minimum face size. + * Different 'detectionModel' values can be provided. To use and compare + different detection models, please refer to [How to specify a detection + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'detection_01': | The default detection model for [LargePersonGroup + Person - Add + Face](/docs/services/563879b61984550e40cbbe8d/operations/599adf2a3a7b9412a4d53f42). + Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong + image orientation, the faces in such cases may not be detected. | + | 'detection_02': | Detection model released in 2019 May with improved + accuracy especially on small, side and blurry faces. |. :param large_person_group_id: Id referencing a particular large person group. @@ -637,6 +708,14 @@ def add_face_from_stream( image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. :type target_face: list[int] + :param detection_model: Name of detection model. Detection model is + used to detect faces in the submitted image. A detection model name + can be provided when performing Face - Detect or (Large)FaceList - Add + Face or (Large)PersonGroup - Add Face. The default value is + 'detection_01', if another model is needed, please explicitly specify + it. Possible values include: 'detection_01', 'detection_02' + :type detection_model: str or + ~azure.cognitiveservices.vision.face.models.DetectionModel :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -668,6 +747,8 @@ def add_face_from_stream( query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024) if target_face is not None: query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',') + if detection_model is not None: + query_parameters['detectionModel'] = self._serialize.query("detection_model", detection_model, 'str') # Construct headers header_parameters = {} diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/person_group_person_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/person_group_person_operations.py index 401dcdc8dc65..8dc83c2c5860 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/person_group_person_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/person_group_person_operations.py @@ -531,18 +531,18 @@ def update_face( update_face.metadata = {'url': '/persongroups/{personGroupId}/persons/{personId}/persistedfaces/{persistedFaceId}'} def add_face_from_url( - self, person_group_id, person_id, url, user_data=None, target_face=None, custom_headers=None, raw=False, **operation_config): - """Add a face to a person into a large person group for face - identification or verification. To deal with an image contains multiple - faces, input face can be specified as an image with a targetFace - rectangle. It returns a persistedFaceId representing the added face. No - image will be stored. Only the extracted face feature will be stored on - server until [LargePersonGroup PersonFace - - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ae2966ac60f11b48b5aa3), - [LargePersonGroup Person - - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599ade5c6ac60f11b48b5aa2) - or [LargePersonGroup - - Delete](/docs/services/563879b61984550e40cbbe8d/operations/599adc216ac60f11b48b5a9f) + self, person_group_id, person_id, url, user_data=None, target_face=None, detection_model="detection_01", custom_headers=None, raw=False, **operation_config): + """Add a face to a person into a person group for face identification or + verification. To deal with an image contains multiple faces, input face + can be specified as an image with a targetFace rectangle. It returns a + persistedFaceId representing the added face. No image will be stored. + Only the extracted face feature will be stored on server until + [PersonGroup PersonFace - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523e), + [PersonGroup Person - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523d) + or [PersonGroup - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395245) is called.
Note persistedFaceId is different from faceId generated by [Face - @@ -563,6 +563,22 @@ def add_face_from_url( * Adding/deleting faces to/from a same person will be processed sequentially. Adding/deleting faces to/from different persons are processed in parallel. + * The minimum detectable face size is 36x36 pixels in an image no + larger than 1920x1080 pixels. Images with dimensions higher than + 1920x1080 pixels will need a proportionally larger minimum face size. + * Different 'detectionModel' values can be provided. To use and compare + different detection models, please refer to [How to specify a detection + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'detection_01': | The default detection model for [PersonGroup Person + - Add + Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b). + Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong + image orientation, the faces in such cases may not be detected. | + | 'detection_02': | Detection model released in 2019 May with improved + accuracy especially on small, side and blurry faces. |. :param person_group_id: Id referencing a particular person group. :type person_group_id: str @@ -579,6 +595,14 @@ def add_face_from_url( image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. :type target_face: list[int] + :param detection_model: Name of detection model. Detection model is + used to detect faces in the submitted image. A detection model name + can be provided when performing Face - Detect or (Large)FaceList - Add + Face or (Large)PersonGroup - Add Face. The default value is + 'detection_01', if another model is needed, please explicitly specify + it. Possible values include: 'detection_01', 'detection_02' + :type detection_model: str or + ~azure.cognitiveservices.vision.face.models.DetectionModel :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -607,6 +631,8 @@ def add_face_from_url( query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024) if target_face is not None: query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',') + if detection_model is not None: + query_parameters['detectionModel'] = self._serialize.query("detection_model", detection_model, 'str') # Construct headers header_parameters = {} @@ -638,9 +664,54 @@ def add_face_from_url( add_face_from_url.metadata = {'url': '/persongroups/{personGroupId}/persons/{personId}/persistedfaces'} def add_face_from_stream( - self, person_group_id, person_id, image, user_data=None, target_face=None, custom_headers=None, raw=False, callback=None, **operation_config): - """Add a representative face to a person for identification. The input - face is specified as an image with a targetFace rectangle. + self, person_group_id, person_id, image, user_data=None, target_face=None, detection_model="detection_01", custom_headers=None, raw=False, callback=None, **operation_config): + """Add a face to a person into a person group for face identification or + verification. To deal with an image contains multiple faces, input face + can be specified as an image with a targetFace rectangle. It returns a + persistedFaceId representing the added face. No image will be stored. + Only the extracted face feature will be stored on server until + [PersonGroup PersonFace - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523e), + [PersonGroup Person - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523d) + or [PersonGroup - + Delete](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395245) + is called. +
Note persistedFaceId is different from faceId generated by [Face + - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236). + * Higher face image quality means better recognition precision. + Please consider high-quality faces: frontal, clear, and face size is + 200x200 pixels (100 pixels between eyes) or bigger. + * Each person entry can hold up to 248 faces. + * JPEG, PNG, GIF (the first frame), and BMP format are supported. The + allowed image file size is from 1KB to 6MB. + * "targetFace" rectangle should contain one face. Zero or multiple + faces will be regarded as an error. If the provided "targetFace" + rectangle is not returned from [Face - + Detect](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f30395236), + there’s no guarantee to detect and add the face successfully. + * Out of detectable face size (36x36 - 4096x4096 pixels), large + head-pose, or large occlusions will cause failures. + * Adding/deleting faces to/from a same person will be processed + sequentially. Adding/deleting faces to/from different persons are + processed in parallel. + * The minimum detectable face size is 36x36 pixels in an image no + larger than 1920x1080 pixels. Images with dimensions higher than + 1920x1080 pixels will need a proportionally larger minimum face size. + * Different 'detectionModel' values can be provided. To use and compare + different detection models, please refer to [How to specify a detection + model](https://docs.microsoft.com/en-us/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model) + | Model | Recommended use-case(s) | + | ---------- | -------- | + | 'detection_01': | The default detection model for [PersonGroup Person + - Add + Face](/docs/services/563879b61984550e40cbbe8d/operations/563879b61984550f3039523b). + Recommend for near frontal face detection. For scenarios with + exceptionally large angle (head-pose) faces, occluded faces or wrong + image orientation, the faces in such cases may not be detected. | + | 'detection_02': | Detection model released in 2019 May with improved + accuracy especially on small, side and blurry faces. |. :param person_group_id: Id referencing a particular person group. :type person_group_id: str @@ -657,6 +728,14 @@ def add_face_from_stream( image, targetFace is required to specify which face to add. No targetFace means there is only one face detected in the entire image. :type target_face: list[int] + :param detection_model: Name of detection model. Detection model is + used to detect faces in the submitted image. A detection model name + can be provided when performing Face - Detect or (Large)FaceList - Add + Face or (Large)PersonGroup - Add Face. The default value is + 'detection_01', if another model is needed, please explicitly specify + it. Possible values include: 'detection_01', 'detection_02' + :type detection_model: str or + ~azure.cognitiveservices.vision.face.models.DetectionModel :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -688,6 +767,8 @@ def add_face_from_stream( query_parameters['userData'] = self._serialize.query("user_data", user_data, 'str', max_length=1024) if target_face is not None: query_parameters['targetFace'] = self._serialize.query("target_face", target_face, '[int]', div=',') + if detection_model is not None: + query_parameters['detectionModel'] = self._serialize.query("detection_model", detection_model, 'str') # Construct headers header_parameters = {} From b6078418d04dbff3cdae42862099b8136fdc76fe Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Thu, 27 Jun 2019 14:04:00 -0700 Subject: [PATCH 2/6] Face with Autorest v4 --- .../cognitiveservices/vision/face/__init__.py | 7 +- .../vision/face/_configuration.py | 47 + .../face/{face_client.py => _face_client.py} | 51 +- .../vision/face/models/__init__.py | 252 +-- ..._client_enums.py => _face_client_enums.py} | 0 .../vision/face/models/_models.py | 1639 +++++++++++++++++ .../vision/face/models/_models_py3.py | 1639 +++++++++++++++++ .../vision/face/models/accessory.py | 34 - .../vision/face/models/accessory_py3.py | 34 - .../vision/face/models/api_error.py | 41 - .../vision/face/models/api_error_py3.py | 41 - .../face/models/apply_snapshot_request.py | 45 - .../face/models/apply_snapshot_request_py3.py | 45 - .../vision/face/models/blur.py | 34 - .../vision/face/models/blur_py3.py | 34 - .../vision/face/models/coordinate.py | 39 - .../vision/face/models/coordinate_py3.py | 39 - .../vision/face/models/detected_face.py | 55 - .../vision/face/models/detected_face_py3.py | 55 - .../vision/face/models/emotion.py | 57 - .../vision/face/models/emotion_py3.py | 57 - .../vision/face/models/error.py | 32 - .../vision/face/models/error_py3.py | 32 - .../vision/face/models/exposure.py | 36 - .../vision/face/models/exposure_py3.py | 36 - .../vision/face/models/face_attributes.py | 85 - .../vision/face/models/face_attributes_py3.py | 85 - .../vision/face/models/face_landmarks.py | 154 -- .../vision/face/models/face_landmarks_py3.py | 154 -- .../vision/face/models/face_list.py | 52 - .../vision/face/models/face_list_py3.py | 52 - .../vision/face/models/face_rectangle.py | 51 - .../vision/face/models/face_rectangle_py3.py | 51 - .../vision/face/models/facial_hair.py | 36 - .../vision/face/models/facial_hair_py3.py | 36 - .../face/models/find_similar_request.py | 75 - .../face/models/find_similar_request_py3.py | 75 - .../vision/face/models/group_request.py | 35 - .../vision/face/models/group_request_py3.py | 35 - .../vision/face/models/group_result.py | 40 - .../vision/face/models/group_result_py3.py | 40 - .../vision/face/models/hair.py | 40 - .../vision/face/models/hair_color.py | 34 - .../vision/face/models/hair_color_py3.py | 34 - .../vision/face/models/hair_py3.py | 40 - .../vision/face/models/head_pose.py | 36 - .../vision/face/models/head_pose_py3.py | 36 - .../vision/face/models/identify_candidate.py | 41 - .../face/models/identify_candidate_py3.py | 41 - .../vision/face/models/identify_request.py | 64 - .../face/models/identify_request_py3.py | 64 - .../vision/face/models/identify_result.py | 43 - .../vision/face/models/identify_result_py3.py | 43 - .../vision/face/models/image_url.py | 34 - .../vision/face/models/image_url_py3.py | 34 - .../vision/face/models/large_face_list.py | 48 - .../vision/face/models/large_face_list_py3.py | 48 - .../vision/face/models/large_person_group.py | 48 - .../face/models/large_person_group_py3.py | 48 - .../vision/face/models/makeup.py | 34 - .../vision/face/models/makeup_py3.py | 34 - .../vision/face/models/meta_data_contract.py | 42 - .../face/models/meta_data_contract_py3.py | 42 - .../models/name_and_user_data_contract.py | 38 - .../models/name_and_user_data_contract_py3.py | 38 - .../vision/face/models/noise.py | 37 - .../vision/face/models/noise_py3.py | 37 - .../vision/face/models/occlusion.py | 38 - .../vision/face/models/occlusion_py3.py | 38 - .../vision/face/models/operation_status.py | 73 - .../face/models/operation_status_py3.py | 73 - .../vision/face/models/persisted_face.py | 43 - .../vision/face/models/persisted_face_py3.py | 43 - .../vision/face/models/person.py | 48 - .../vision/face/models/person_group.py | 48 - .../vision/face/models/person_group_py3.py | 48 - .../vision/face/models/person_py3.py | 48 - .../vision/face/models/similar_face.py | 46 - .../vision/face/models/similar_face_py3.py | 46 - .../vision/face/models/snapshot.py | 78 - .../vision/face/models/snapshot_py3.py | 78 - .../face/models/take_snapshot_request.py | 57 - .../face/models/take_snapshot_request_py3.py | 57 - .../vision/face/models/training_status.py | 67 - .../vision/face/models/training_status_py3.py | 67 - .../vision/face/models/update_face_request.py | 33 - .../face/models/update_face_request_py3.py | 33 - .../face/models/update_snapshot_request.py | 41 - .../models/update_snapshot_request_py3.py | 41 - .../models/verify_face_to_face_request.py | 41 - .../models/verify_face_to_face_request_py3.py | 41 - .../models/verify_face_to_person_request.py | 57 - .../verify_face_to_person_request_py3.py | 57 - .../vision/face/models/verify_result.py | 45 - .../vision/face/models/verify_result_py3.py | 45 - .../vision/face/operations/__init__.py | 16 +- ...operations.py => _face_list_operations.py} | 6 +- ...face_operations.py => _face_operations.py} | 9 +- ...ions.py => _large_face_list_operations.py} | 9 +- ...s.py => _large_person_group_operations.py} | 5 +- ... _large_person_group_person_operations.py} | 8 +- ...rations.py => _person_group_operations.py} | 5 +- ....py => _person_group_person_operations.py} | 8 +- ..._operations.py => _snapshot_operations.py} | 5 +- 104 files changed, 3490 insertions(+), 4526 deletions(-) create mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/_configuration.py rename sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/{face_client.py => _face_client.py} (68%) rename sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/{face_client_enums.py => _face_client_enums.py} (100%) create mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/_models.py create mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/_models_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/accessory.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/accessory_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/api_error.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/api_error_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/apply_snapshot_request.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/apply_snapshot_request_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/blur.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/blur_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/coordinate.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/coordinate_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/detected_face.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/detected_face_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/emotion.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/emotion_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/error.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/error_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/exposure.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/exposure_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_attributes.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_attributes_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_landmarks.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_landmarks_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_list.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_list_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_rectangle.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_rectangle_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/facial_hair.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/facial_hair_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/find_similar_request.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/find_similar_request_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_request.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_request_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_result.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_result_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_color.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_color_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/head_pose.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/head_pose_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_candidate.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_candidate_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_request.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_request_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_result.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_result_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/image_url.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/image_url_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_person_group.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_person_group_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/makeup.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/makeup_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/meta_data_contract.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/meta_data_contract_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/name_and_user_data_contract.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/name_and_user_data_contract_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/noise.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/noise_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/occlusion.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/occlusion_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/operation_status.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/operation_status_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/persisted_face.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/persisted_face_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_group.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_group_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/similar_face.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/similar_face_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/snapshot.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/snapshot_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/take_snapshot_request.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/take_snapshot_request_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/training_status.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/training_status_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_face_request.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_face_request_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_snapshot_request.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_snapshot_request_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_face_request.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_face_request_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_person_request.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_person_request_py3.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_result.py delete mode 100644 sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_result_py3.py rename sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/{face_list_operations.py => _face_list_operations.py} (99%) rename sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/{face_operations.py => _face_operations.py} (99%) rename sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/{large_face_list_operations.py => _large_face_list_operations.py} (99%) rename sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/{large_person_group_operations.py => _large_person_group_operations.py} (99%) rename sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/{large_person_group_person_operations.py => _large_person_group_person_operations.py} (99%) rename sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/{person_group_operations.py => _person_group_operations.py} (99%) rename sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/{person_group_person_operations.py => _person_group_person_operations.py} (99%) rename sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/{snapshot_operations.py => _snapshot_operations.py} (99%) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/__init__.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/__init__.py index 15376d8ae672..f7f99b1d745e 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/__init__.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/__init__.py @@ -9,10 +9,11 @@ # regenerated. # -------------------------------------------------------------------------- -from .face_client import FaceClient -from .version import VERSION +from ._configuration import FaceClientConfiguration +from ._face_client import FaceClient +__all__ = ['FaceClient', 'FaceClientConfiguration'] -__all__ = ['FaceClient'] +from .version import VERSION __version__ = VERSION diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/_configuration.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/_configuration.py new file mode 100644 index 000000000000..5caa9b772876 --- /dev/null +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/_configuration.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest import Configuration + +from .version import VERSION + + +class FaceClientConfiguration(Configuration): + """Configuration for FaceClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param endpoint: Supported Cognitive Services endpoints (protocol and + hostname, for example: https://westus.api.cognitive.microsoft.com). + :type endpoint: str + :param credentials: Subscription credentials which uniquely identify + client subscription. + :type credentials: None + """ + + def __init__( + self, endpoint, credentials): + + if endpoint is None: + raise ValueError("Parameter 'endpoint' must not be None.") + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + base_url = '{Endpoint}/face/v1.0' + + super(FaceClientConfiguration, self).__init__(base_url) + + # Starting Autorest.Python 4.0.64, make connection pool activated by default + self.keep_alive = True + + self.add_user_agent('azure-cognitiveservices-vision-face/{}'.format(VERSION)) + + self.endpoint = endpoint + self.credentials = credentials diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/face_client.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/_face_client.py similarity index 68% rename from sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/face_client.py rename to sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/_face_client.py index 6252437cf62d..3325cd82e506 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/face_client.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/_face_client.py @@ -10,49 +10,20 @@ # -------------------------------------------------------------------------- from msrest.service_client import SDKClient -from msrest import Configuration, Serializer, Deserializer -from .version import VERSION -from .operations.face_operations import FaceOperations -from .operations.person_group_person_operations import PersonGroupPersonOperations -from .operations.person_group_operations import PersonGroupOperations -from .operations.face_list_operations import FaceListOperations -from .operations.large_person_group_person_operations import LargePersonGroupPersonOperations -from .operations.large_person_group_operations import LargePersonGroupOperations -from .operations.large_face_list_operations import LargeFaceListOperations -from .operations.snapshot_operations import SnapshotOperations +from msrest import Serializer, Deserializer + +from ._configuration import FaceClientConfiguration +from .operations import FaceOperations +from .operations import PersonGroupPersonOperations +from .operations import PersonGroupOperations +from .operations import FaceListOperations +from .operations import LargePersonGroupPersonOperations +from .operations import LargePersonGroupOperations +from .operations import LargeFaceListOperations +from .operations import SnapshotOperations from . import models -class FaceClientConfiguration(Configuration): - """Configuration for FaceClient - Note that all parameters used to create this instance are saved as instance - attributes. - - :param endpoint: Supported Cognitive Services endpoints (protocol and - hostname, for example: https://westus.api.cognitive.microsoft.com). - :type endpoint: str - :param credentials: Subscription credentials which uniquely identify - client subscription. - :type credentials: None - """ - - def __init__( - self, endpoint, credentials): - - if endpoint is None: - raise ValueError("Parameter 'endpoint' must not be None.") - if credentials is None: - raise ValueError("Parameter 'credentials' must not be None.") - base_url = '{Endpoint}/face/v1.0' - - super(FaceClientConfiguration, self).__init__(base_url) - - self.add_user_agent('azure-cognitiveservices-vision-face/{}'.format(VERSION)) - - self.endpoint = endpoint - self.credentials = credentials - - class FaceClient(SDKClient): """An API for face detection, verification, and identification. diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/__init__.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/__init__.py index eb488346f616..79f68c61ae3e 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/__init__.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/__init__.py @@ -10,158 +10,158 @@ # -------------------------------------------------------------------------- try: - from .error_py3 import Error - from .api_error_py3 import APIError, APIErrorException - from .face_rectangle_py3 import FaceRectangle - from .coordinate_py3 import Coordinate - from .face_landmarks_py3 import FaceLandmarks - from .facial_hair_py3 import FacialHair - from .head_pose_py3 import HeadPose - from .emotion_py3 import Emotion - from .hair_color_py3 import HairColor - from .hair_py3 import Hair - from .makeup_py3 import Makeup - from .occlusion_py3 import Occlusion - from .accessory_py3 import Accessory - from .blur_py3 import Blur - from .exposure_py3 import Exposure - from .noise_py3 import Noise - from .face_attributes_py3 import FaceAttributes - from .detected_face_py3 import DetectedFace - from .find_similar_request_py3 import FindSimilarRequest - from .similar_face_py3 import SimilarFace - from .group_request_py3 import GroupRequest - from .group_result_py3 import GroupResult - from .identify_request_py3 import IdentifyRequest - from .identify_candidate_py3 import IdentifyCandidate - from .identify_result_py3 import IdentifyResult - from .verify_face_to_person_request_py3 import VerifyFaceToPersonRequest - from .verify_face_to_face_request_py3 import VerifyFaceToFaceRequest - from .verify_result_py3 import VerifyResult - from .persisted_face_py3 import PersistedFace - from .face_list_py3 import FaceList - from .person_group_py3 import PersonGroup - from .person_py3 import Person - from .large_face_list_py3 import LargeFaceList - from .large_person_group_py3 import LargePersonGroup - from .update_face_request_py3 import UpdateFaceRequest - from .training_status_py3 import TrainingStatus - from .name_and_user_data_contract_py3 import NameAndUserDataContract - from .meta_data_contract_py3 import MetaDataContract - from .apply_snapshot_request_py3 import ApplySnapshotRequest - from .snapshot_py3 import Snapshot - from .take_snapshot_request_py3 import TakeSnapshotRequest - from .update_snapshot_request_py3 import UpdateSnapshotRequest - from .operation_status_py3 import OperationStatus - from .image_url_py3 import ImageUrl + from ._models_py3 import Accessory + from ._models_py3 import APIError, APIErrorException + from ._models_py3 import ApplySnapshotRequest + from ._models_py3 import Blur + from ._models_py3 import Coordinate + from ._models_py3 import DetectedFace + from ._models_py3 import Emotion + from ._models_py3 import Error + from ._models_py3 import Exposure + from ._models_py3 import FaceAttributes + from ._models_py3 import FaceLandmarks + from ._models_py3 import FaceList + from ._models_py3 import FaceRectangle + from ._models_py3 import FacialHair + from ._models_py3 import FindSimilarRequest + from ._models_py3 import GroupRequest + from ._models_py3 import GroupResult + from ._models_py3 import Hair + from ._models_py3 import HairColor + from ._models_py3 import HeadPose + from ._models_py3 import IdentifyCandidate + from ._models_py3 import IdentifyRequest + from ._models_py3 import IdentifyResult + from ._models_py3 import ImageUrl + from ._models_py3 import LargeFaceList + from ._models_py3 import LargePersonGroup + from ._models_py3 import Makeup + from ._models_py3 import MetaDataContract + from ._models_py3 import NameAndUserDataContract + from ._models_py3 import Noise + from ._models_py3 import Occlusion + from ._models_py3 import OperationStatus + from ._models_py3 import PersistedFace + from ._models_py3 import Person + from ._models_py3 import PersonGroup + from ._models_py3 import SimilarFace + from ._models_py3 import Snapshot + from ._models_py3 import TakeSnapshotRequest + from ._models_py3 import TrainingStatus + from ._models_py3 import UpdateFaceRequest + from ._models_py3 import UpdateSnapshotRequest + from ._models_py3 import VerifyFaceToFaceRequest + from ._models_py3 import VerifyFaceToPersonRequest + from ._models_py3 import VerifyResult except (SyntaxError, ImportError): - from .error import Error - from .api_error import APIError, APIErrorException - from .face_rectangle import FaceRectangle - from .coordinate import Coordinate - from .face_landmarks import FaceLandmarks - from .facial_hair import FacialHair - from .head_pose import HeadPose - from .emotion import Emotion - from .hair_color import HairColor - from .hair import Hair - from .makeup import Makeup - from .occlusion import Occlusion - from .accessory import Accessory - from .blur import Blur - from .exposure import Exposure - from .noise import Noise - from .face_attributes import FaceAttributes - from .detected_face import DetectedFace - from .find_similar_request import FindSimilarRequest - from .similar_face import SimilarFace - from .group_request import GroupRequest - from .group_result import GroupResult - from .identify_request import IdentifyRequest - from .identify_candidate import IdentifyCandidate - from .identify_result import IdentifyResult - from .verify_face_to_person_request import VerifyFaceToPersonRequest - from .verify_face_to_face_request import VerifyFaceToFaceRequest - from .verify_result import VerifyResult - from .persisted_face import PersistedFace - from .face_list import FaceList - from .person_group import PersonGroup - from .person import Person - from .large_face_list import LargeFaceList - from .large_person_group import LargePersonGroup - from .update_face_request import UpdateFaceRequest - from .training_status import TrainingStatus - from .name_and_user_data_contract import NameAndUserDataContract - from .meta_data_contract import MetaDataContract - from .apply_snapshot_request import ApplySnapshotRequest - from .snapshot import Snapshot - from .take_snapshot_request import TakeSnapshotRequest - from .update_snapshot_request import UpdateSnapshotRequest - from .operation_status import OperationStatus - from .image_url import ImageUrl -from .face_client_enums import ( - RecognitionModel, - Gender, - GlassesType, - HairColorType, + from ._models import Accessory + from ._models import APIError, APIErrorException + from ._models import ApplySnapshotRequest + from ._models import Blur + from ._models import Coordinate + from ._models import DetectedFace + from ._models import Emotion + from ._models import Error + from ._models import Exposure + from ._models import FaceAttributes + from ._models import FaceLandmarks + from ._models import FaceList + from ._models import FaceRectangle + from ._models import FacialHair + from ._models import FindSimilarRequest + from ._models import GroupRequest + from ._models import GroupResult + from ._models import Hair + from ._models import HairColor + from ._models import HeadPose + from ._models import IdentifyCandidate + from ._models import IdentifyRequest + from ._models import IdentifyResult + from ._models import ImageUrl + from ._models import LargeFaceList + from ._models import LargePersonGroup + from ._models import Makeup + from ._models import MetaDataContract + from ._models import NameAndUserDataContract + from ._models import Noise + from ._models import Occlusion + from ._models import OperationStatus + from ._models import PersistedFace + from ._models import Person + from ._models import PersonGroup + from ._models import SimilarFace + from ._models import Snapshot + from ._models import TakeSnapshotRequest + from ._models import TrainingStatus + from ._models import UpdateFaceRequest + from ._models import UpdateSnapshotRequest + from ._models import VerifyFaceToFaceRequest + from ._models import VerifyFaceToPersonRequest + from ._models import VerifyResult +from ._face_client_enums import ( AccessoryType, BlurLevel, + DetectionModel, ExposureLevel, - NoiseLevel, + FaceAttributeType, FindSimilarMatchMode, - TrainingStatusType, + Gender, + GlassesType, + HairColorType, + NoiseLevel, + OperationStatusType, + RecognitionModel, SnapshotApplyMode, SnapshotObjectType, - OperationStatusType, - FaceAttributeType, - DetectionModel, + TrainingStatusType, ) __all__ = [ - 'Error', + 'Accessory', 'APIError', 'APIErrorException', - 'FaceRectangle', + 'ApplySnapshotRequest', + 'Blur', 'Coordinate', - 'FaceLandmarks', - 'FacialHair', - 'HeadPose', + 'DetectedFace', 'Emotion', - 'HairColor', - 'Hair', - 'Makeup', - 'Occlusion', - 'Accessory', - 'Blur', + 'Error', 'Exposure', - 'Noise', 'FaceAttributes', - 'DetectedFace', + 'FaceLandmarks', + 'FaceList', + 'FaceRectangle', + 'FacialHair', 'FindSimilarRequest', - 'SimilarFace', 'GroupRequest', 'GroupResult', - 'IdentifyRequest', + 'Hair', + 'HairColor', + 'HeadPose', 'IdentifyCandidate', + 'IdentifyRequest', 'IdentifyResult', - 'VerifyFaceToPersonRequest', - 'VerifyFaceToFaceRequest', - 'VerifyResult', - 'PersistedFace', - 'FaceList', - 'PersonGroup', - 'Person', + 'ImageUrl', 'LargeFaceList', 'LargePersonGroup', - 'UpdateFaceRequest', - 'TrainingStatus', - 'NameAndUserDataContract', + 'Makeup', 'MetaDataContract', - 'ApplySnapshotRequest', + 'NameAndUserDataContract', + 'Noise', + 'Occlusion', + 'OperationStatus', + 'PersistedFace', + 'Person', + 'PersonGroup', + 'SimilarFace', 'Snapshot', 'TakeSnapshotRequest', + 'TrainingStatus', + 'UpdateFaceRequest', 'UpdateSnapshotRequest', - 'OperationStatus', - 'ImageUrl', + 'VerifyFaceToFaceRequest', + 'VerifyFaceToPersonRequest', + 'VerifyResult', 'RecognitionModel', 'Gender', 'GlassesType', diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_client_enums.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/_face_client_enums.py similarity index 100% rename from sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_client_enums.py rename to sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/_face_client_enums.py diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/_models.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/_models.py new file mode 100644 index 000000000000..bc0b3049df8e --- /dev/null +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/_models.py @@ -0,0 +1,1639 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class Accessory(Model): + """Accessory item and corresponding confidence level. + + :param type: Type of an accessory. Possible values include: 'headWear', + 'glasses', 'mask' + :type type: str or + ~azure.cognitiveservices.vision.face.models.AccessoryType + :param confidence: Confidence level of an accessory + :type confidence: float + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'AccessoryType'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(Accessory, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.confidence = kwargs.get('confidence', None) + + +class APIError(Model): + """Error information returned by the API. + + :param error: + :type error: ~azure.cognitiveservices.vision.face.models.Error + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'Error'}, + } + + def __init__(self, **kwargs): + super(APIError, self).__init__(**kwargs) + self.error = kwargs.get('error', None) + + +class APIErrorException(HttpOperationError): + """Server responsed with exception of type: 'APIError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(APIErrorException, self).__init__(deserialize, response, 'APIError', *args) + + +class ApplySnapshotRequest(Model): + """Request body for applying snapshot operation. + + All required parameters must be populated in order to send to Azure. + + :param object_id: Required. User specified target object id to be created + from the snapshot. + :type object_id: str + :param mode: Snapshot applying mode. Currently only CreateNew is + supported, which means the apply operation will fail if target + subscription already contains an object of same type and using the same + objectId. Users can specify the "objectId" in request body to avoid such + conflicts. Possible values include: 'CreateNew'. Default value: + "CreateNew" . + :type mode: str or + ~azure.cognitiveservices.vision.face.models.SnapshotApplyMode + """ + + _validation = { + 'object_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + } + + _attribute_map = { + 'object_id': {'key': 'objectId', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'SnapshotApplyMode'}, + } + + def __init__(self, **kwargs): + super(ApplySnapshotRequest, self).__init__(**kwargs) + self.object_id = kwargs.get('object_id', None) + self.mode = kwargs.get('mode', "CreateNew") + + +class Blur(Model): + """Properties describing any presence of blur within the image. + + :param blur_level: An enum value indicating level of blurriness. Possible + values include: 'Low', 'Medium', 'High' + :type blur_level: str or + ~azure.cognitiveservices.vision.face.models.BlurLevel + :param value: A number indicating level of blurriness ranging from 0 to 1. + :type value: float + """ + + _attribute_map = { + 'blur_level': {'key': 'blurLevel', 'type': 'BlurLevel'}, + 'value': {'key': 'value', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(Blur, self).__init__(**kwargs) + self.blur_level = kwargs.get('blur_level', None) + self.value = kwargs.get('value', None) + + +class Coordinate(Model): + """Coordinates within an image. + + All required parameters must be populated in order to send to Azure. + + :param x: Required. The horizontal component, in pixels. + :type x: float + :param y: Required. The vertical component, in pixels. + :type y: float + """ + + _validation = { + 'x': {'required': True}, + 'y': {'required': True}, + } + + _attribute_map = { + 'x': {'key': 'x', 'type': 'float'}, + 'y': {'key': 'y', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(Coordinate, self).__init__(**kwargs) + self.x = kwargs.get('x', None) + self.y = kwargs.get('y', None) + + +class DetectedFace(Model): + """Detected Face object. + + All required parameters must be populated in order to send to Azure. + + :param face_id: + :type face_id: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + :param face_rectangle: Required. + :type face_rectangle: + ~azure.cognitiveservices.vision.face.models.FaceRectangle + :param face_landmarks: + :type face_landmarks: + ~azure.cognitiveservices.vision.face.models.FaceLandmarks + :param face_attributes: + :type face_attributes: + ~azure.cognitiveservices.vision.face.models.FaceAttributes + """ + + _validation = { + 'face_rectangle': {'required': True}, + } + + _attribute_map = { + 'face_id': {'key': 'faceId', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, + 'face_landmarks': {'key': 'faceLandmarks', 'type': 'FaceLandmarks'}, + 'face_attributes': {'key': 'faceAttributes', 'type': 'FaceAttributes'}, + } + + def __init__(self, **kwargs): + super(DetectedFace, self).__init__(**kwargs) + self.face_id = kwargs.get('face_id', None) + self.recognition_model = kwargs.get('recognition_model', "recognition_01") + self.face_rectangle = kwargs.get('face_rectangle', None) + self.face_landmarks = kwargs.get('face_landmarks', None) + self.face_attributes = kwargs.get('face_attributes', None) + + +class Emotion(Model): + """Properties describing facial emotion in form of confidence ranging from 0 + to 1. + + :param anger: + :type anger: float + :param contempt: + :type contempt: float + :param disgust: + :type disgust: float + :param fear: + :type fear: float + :param happiness: + :type happiness: float + :param neutral: + :type neutral: float + :param sadness: + :type sadness: float + :param surprise: + :type surprise: float + """ + + _attribute_map = { + 'anger': {'key': 'anger', 'type': 'float'}, + 'contempt': {'key': 'contempt', 'type': 'float'}, + 'disgust': {'key': 'disgust', 'type': 'float'}, + 'fear': {'key': 'fear', 'type': 'float'}, + 'happiness': {'key': 'happiness', 'type': 'float'}, + 'neutral': {'key': 'neutral', 'type': 'float'}, + 'sadness': {'key': 'sadness', 'type': 'float'}, + 'surprise': {'key': 'surprise', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(Emotion, self).__init__(**kwargs) + self.anger = kwargs.get('anger', None) + self.contempt = kwargs.get('contempt', None) + self.disgust = kwargs.get('disgust', None) + self.fear = kwargs.get('fear', None) + self.happiness = kwargs.get('happiness', None) + self.neutral = kwargs.get('neutral', None) + self.sadness = kwargs.get('sadness', None) + self.surprise = kwargs.get('surprise', None) + + +class Error(Model): + """Error body. + + :param code: + :type code: str + :param message: + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Error, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + + +class Exposure(Model): + """Properties describing exposure level of the image. + + :param exposure_level: An enum value indicating level of exposure. + Possible values include: 'UnderExposure', 'GoodExposure', 'OverExposure' + :type exposure_level: str or + ~azure.cognitiveservices.vision.face.models.ExposureLevel + :param value: A number indicating level of exposure level ranging from 0 + to 1. [0, 0.25) is under exposure. [0.25, 0.75) is good exposure. [0.75, + 1] is over exposure. + :type value: float + """ + + _attribute_map = { + 'exposure_level': {'key': 'exposureLevel', 'type': 'ExposureLevel'}, + 'value': {'key': 'value', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(Exposure, self).__init__(**kwargs) + self.exposure_level = kwargs.get('exposure_level', None) + self.value = kwargs.get('value', None) + + +class FaceAttributes(Model): + """Face Attributes. + + :param age: Age in years + :type age: float + :param gender: Possible gender of the face. Possible values include: + 'male', 'female' + :type gender: str or ~azure.cognitiveservices.vision.face.models.Gender + :param smile: Smile intensity, a number between [0,1] + :type smile: float + :param facial_hair: Properties describing facial hair attributes. + :type facial_hair: ~azure.cognitiveservices.vision.face.models.FacialHair + :param glasses: Glasses type if any of the face. Possible values include: + 'noGlasses', 'readingGlasses', 'sunglasses', 'swimmingGoggles' + :type glasses: str or + ~azure.cognitiveservices.vision.face.models.GlassesType + :param head_pose: Properties indicating head pose of the face. + :type head_pose: ~azure.cognitiveservices.vision.face.models.HeadPose + :param emotion: Properties describing facial emotion in form of confidence + ranging from 0 to 1. + :type emotion: ~azure.cognitiveservices.vision.face.models.Emotion + :param hair: Properties describing hair attributes. + :type hair: ~azure.cognitiveservices.vision.face.models.Hair + :param makeup: Properties describing present makeups on a given face. + :type makeup: ~azure.cognitiveservices.vision.face.models.Makeup + :param occlusion: Properties describing occlusions on a given face. + :type occlusion: ~azure.cognitiveservices.vision.face.models.Occlusion + :param accessories: Properties describing any accessories on a given face. + :type accessories: + list[~azure.cognitiveservices.vision.face.models.Accessory] + :param blur: Properties describing any presence of blur within the image. + :type blur: ~azure.cognitiveservices.vision.face.models.Blur + :param exposure: Properties describing exposure level of the image. + :type exposure: ~azure.cognitiveservices.vision.face.models.Exposure + :param noise: Properties describing noise level of the image. + :type noise: ~azure.cognitiveservices.vision.face.models.Noise + """ + + _attribute_map = { + 'age': {'key': 'age', 'type': 'float'}, + 'gender': {'key': 'gender', 'type': 'Gender'}, + 'smile': {'key': 'smile', 'type': 'float'}, + 'facial_hair': {'key': 'facialHair', 'type': 'FacialHair'}, + 'glasses': {'key': 'glasses', 'type': 'GlassesType'}, + 'head_pose': {'key': 'headPose', 'type': 'HeadPose'}, + 'emotion': {'key': 'emotion', 'type': 'Emotion'}, + 'hair': {'key': 'hair', 'type': 'Hair'}, + 'makeup': {'key': 'makeup', 'type': 'Makeup'}, + 'occlusion': {'key': 'occlusion', 'type': 'Occlusion'}, + 'accessories': {'key': 'accessories', 'type': '[Accessory]'}, + 'blur': {'key': 'blur', 'type': 'Blur'}, + 'exposure': {'key': 'exposure', 'type': 'Exposure'}, + 'noise': {'key': 'noise', 'type': 'Noise'}, + } + + def __init__(self, **kwargs): + super(FaceAttributes, self).__init__(**kwargs) + self.age = kwargs.get('age', None) + self.gender = kwargs.get('gender', None) + self.smile = kwargs.get('smile', None) + self.facial_hair = kwargs.get('facial_hair', None) + self.glasses = kwargs.get('glasses', None) + self.head_pose = kwargs.get('head_pose', None) + self.emotion = kwargs.get('emotion', None) + self.hair = kwargs.get('hair', None) + self.makeup = kwargs.get('makeup', None) + self.occlusion = kwargs.get('occlusion', None) + self.accessories = kwargs.get('accessories', None) + self.blur = kwargs.get('blur', None) + self.exposure = kwargs.get('exposure', None) + self.noise = kwargs.get('noise', None) + + +class FaceLandmarks(Model): + """A collection of 27-point face landmarks pointing to the important positions + of face components. + + :param pupil_left: + :type pupil_left: ~azure.cognitiveservices.vision.face.models.Coordinate + :param pupil_right: + :type pupil_right: ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_tip: + :type nose_tip: ~azure.cognitiveservices.vision.face.models.Coordinate + :param mouth_left: + :type mouth_left: ~azure.cognitiveservices.vision.face.models.Coordinate + :param mouth_right: + :type mouth_right: ~azure.cognitiveservices.vision.face.models.Coordinate + :param eyebrow_left_outer: + :type eyebrow_left_outer: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eyebrow_left_inner: + :type eyebrow_left_inner: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_left_outer: + :type eye_left_outer: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_left_top: + :type eye_left_top: ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_left_bottom: + :type eye_left_bottom: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_left_inner: + :type eye_left_inner: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eyebrow_right_inner: + :type eyebrow_right_inner: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eyebrow_right_outer: + :type eyebrow_right_outer: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_right_inner: + :type eye_right_inner: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_right_top: + :type eye_right_top: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_right_bottom: + :type eye_right_bottom: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_right_outer: + :type eye_right_outer: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_root_left: + :type nose_root_left: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_root_right: + :type nose_root_right: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_left_alar_top: + :type nose_left_alar_top: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_right_alar_top: + :type nose_right_alar_top: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_left_alar_out_tip: + :type nose_left_alar_out_tip: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_right_alar_out_tip: + :type nose_right_alar_out_tip: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param upper_lip_top: + :type upper_lip_top: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param upper_lip_bottom: + :type upper_lip_bottom: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param under_lip_top: + :type under_lip_top: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param under_lip_bottom: + :type under_lip_bottom: + ~azure.cognitiveservices.vision.face.models.Coordinate + """ + + _attribute_map = { + 'pupil_left': {'key': 'pupilLeft', 'type': 'Coordinate'}, + 'pupil_right': {'key': 'pupilRight', 'type': 'Coordinate'}, + 'nose_tip': {'key': 'noseTip', 'type': 'Coordinate'}, + 'mouth_left': {'key': 'mouthLeft', 'type': 'Coordinate'}, + 'mouth_right': {'key': 'mouthRight', 'type': 'Coordinate'}, + 'eyebrow_left_outer': {'key': 'eyebrowLeftOuter', 'type': 'Coordinate'}, + 'eyebrow_left_inner': {'key': 'eyebrowLeftInner', 'type': 'Coordinate'}, + 'eye_left_outer': {'key': 'eyeLeftOuter', 'type': 'Coordinate'}, + 'eye_left_top': {'key': 'eyeLeftTop', 'type': 'Coordinate'}, + 'eye_left_bottom': {'key': 'eyeLeftBottom', 'type': 'Coordinate'}, + 'eye_left_inner': {'key': 'eyeLeftInner', 'type': 'Coordinate'}, + 'eyebrow_right_inner': {'key': 'eyebrowRightInner', 'type': 'Coordinate'}, + 'eyebrow_right_outer': {'key': 'eyebrowRightOuter', 'type': 'Coordinate'}, + 'eye_right_inner': {'key': 'eyeRightInner', 'type': 'Coordinate'}, + 'eye_right_top': {'key': 'eyeRightTop', 'type': 'Coordinate'}, + 'eye_right_bottom': {'key': 'eyeRightBottom', 'type': 'Coordinate'}, + 'eye_right_outer': {'key': 'eyeRightOuter', 'type': 'Coordinate'}, + 'nose_root_left': {'key': 'noseRootLeft', 'type': 'Coordinate'}, + 'nose_root_right': {'key': 'noseRootRight', 'type': 'Coordinate'}, + 'nose_left_alar_top': {'key': 'noseLeftAlarTop', 'type': 'Coordinate'}, + 'nose_right_alar_top': {'key': 'noseRightAlarTop', 'type': 'Coordinate'}, + 'nose_left_alar_out_tip': {'key': 'noseLeftAlarOutTip', 'type': 'Coordinate'}, + 'nose_right_alar_out_tip': {'key': 'noseRightAlarOutTip', 'type': 'Coordinate'}, + 'upper_lip_top': {'key': 'upperLipTop', 'type': 'Coordinate'}, + 'upper_lip_bottom': {'key': 'upperLipBottom', 'type': 'Coordinate'}, + 'under_lip_top': {'key': 'underLipTop', 'type': 'Coordinate'}, + 'under_lip_bottom': {'key': 'underLipBottom', 'type': 'Coordinate'}, + } + + def __init__(self, **kwargs): + super(FaceLandmarks, self).__init__(**kwargs) + self.pupil_left = kwargs.get('pupil_left', None) + self.pupil_right = kwargs.get('pupil_right', None) + self.nose_tip = kwargs.get('nose_tip', None) + self.mouth_left = kwargs.get('mouth_left', None) + self.mouth_right = kwargs.get('mouth_right', None) + self.eyebrow_left_outer = kwargs.get('eyebrow_left_outer', None) + self.eyebrow_left_inner = kwargs.get('eyebrow_left_inner', None) + self.eye_left_outer = kwargs.get('eye_left_outer', None) + self.eye_left_top = kwargs.get('eye_left_top', None) + self.eye_left_bottom = kwargs.get('eye_left_bottom', None) + self.eye_left_inner = kwargs.get('eye_left_inner', None) + self.eyebrow_right_inner = kwargs.get('eyebrow_right_inner', None) + self.eyebrow_right_outer = kwargs.get('eyebrow_right_outer', None) + self.eye_right_inner = kwargs.get('eye_right_inner', None) + self.eye_right_top = kwargs.get('eye_right_top', None) + self.eye_right_bottom = kwargs.get('eye_right_bottom', None) + self.eye_right_outer = kwargs.get('eye_right_outer', None) + self.nose_root_left = kwargs.get('nose_root_left', None) + self.nose_root_right = kwargs.get('nose_root_right', None) + self.nose_left_alar_top = kwargs.get('nose_left_alar_top', None) + self.nose_right_alar_top = kwargs.get('nose_right_alar_top', None) + self.nose_left_alar_out_tip = kwargs.get('nose_left_alar_out_tip', None) + self.nose_right_alar_out_tip = kwargs.get('nose_right_alar_out_tip', None) + self.upper_lip_top = kwargs.get('upper_lip_top', None) + self.upper_lip_bottom = kwargs.get('upper_lip_bottom', None) + self.under_lip_top = kwargs.get('under_lip_top', None) + self.under_lip_bottom = kwargs.get('under_lip_bottom', None) + + +class NameAndUserDataContract(Model): + """A combination of user defined name and user specified data for the person, + largePersonGroup/personGroup, and largeFaceList/faceList. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NameAndUserDataContract, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.user_data = kwargs.get('user_data', None) + + +class MetaDataContract(NameAndUserDataContract): + """A combination of user defined name and user specified data and recognition + model name for largePersonGroup/personGroup, and largeFaceList/faceList. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(MetaDataContract, self).__init__(**kwargs) + self.recognition_model = kwargs.get('recognition_model', "recognition_01") + + +class FaceList(MetaDataContract): + """Face list object. + + All required parameters must be populated in order to send to Azure. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + :param face_list_id: Required. FaceListId of the target face list. + :type face_list_id: str + :param persisted_faces: Persisted faces within the face list. + :type persisted_faces: + list[~azure.cognitiveservices.vision.face.models.PersistedFace] + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + 'face_list_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + 'face_list_id': {'key': 'faceListId', 'type': 'str'}, + 'persisted_faces': {'key': 'persistedFaces', 'type': '[PersistedFace]'}, + } + + def __init__(self, **kwargs): + super(FaceList, self).__init__(**kwargs) + self.face_list_id = kwargs.get('face_list_id', None) + self.persisted_faces = kwargs.get('persisted_faces', None) + + +class FaceRectangle(Model): + """A rectangle within which a face can be found. + + All required parameters must be populated in order to send to Azure. + + :param width: Required. The width of the rectangle, in pixels. + :type width: int + :param height: Required. The height of the rectangle, in pixels. + :type height: int + :param left: Required. The distance from the left edge if the image to the + left edge of the rectangle, in pixels. + :type left: int + :param top: Required. The distance from the top edge if the image to the + top edge of the rectangle, in pixels. + :type top: int + """ + + _validation = { + 'width': {'required': True}, + 'height': {'required': True}, + 'left': {'required': True}, + 'top': {'required': True}, + } + + _attribute_map = { + 'width': {'key': 'width', 'type': 'int'}, + 'height': {'key': 'height', 'type': 'int'}, + 'left': {'key': 'left', 'type': 'int'}, + 'top': {'key': 'top', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(FaceRectangle, self).__init__(**kwargs) + self.width = kwargs.get('width', None) + self.height = kwargs.get('height', None) + self.left = kwargs.get('left', None) + self.top = kwargs.get('top', None) + + +class FacialHair(Model): + """Properties describing facial hair attributes. + + :param moustache: + :type moustache: float + :param beard: + :type beard: float + :param sideburns: + :type sideburns: float + """ + + _attribute_map = { + 'moustache': {'key': 'moustache', 'type': 'float'}, + 'beard': {'key': 'beard', 'type': 'float'}, + 'sideburns': {'key': 'sideburns', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(FacialHair, self).__init__(**kwargs) + self.moustache = kwargs.get('moustache', None) + self.beard = kwargs.get('beard', None) + self.sideburns = kwargs.get('sideburns', None) + + +class FindSimilarRequest(Model): + """Request body for find similar operation. + + All required parameters must be populated in order to send to Azure. + + :param face_id: Required. FaceId of the query face. User needs to call + Face - Detect first to get a valid faceId. Note that this faceId is not + persisted and will expire 24 hours after the detection call + :type face_id: str + :param face_list_id: An existing user-specified unique candidate face + list, created in Face List - Create a Face List. Face list contains a set + of persistedFaceIds which are persisted and will never expire. Parameter + faceListId, largeFaceListId and faceIds should not be provided at the same + time. + :type face_list_id: str + :param large_face_list_id: An existing user-specified unique candidate + large face list, created in LargeFaceList - Create. Large face list + contains a set of persistedFaceIds which are persisted and will never + expire. Parameter faceListId, largeFaceListId and faceIds should not be + provided at the same time. + :type large_face_list_id: str + :param face_ids: An array of candidate faceIds. All of them are created by + Face - Detect and the faceIds will expire 24 hours after the detection + call. The number of faceIds is limited to 1000. Parameter faceListId, + largeFaceListId and faceIds should not be provided at the same time. + :type face_ids: list[str] + :param max_num_of_candidates_returned: The number of top similar faces + returned. The valid range is [1, 1000]. Default value: 20 . + :type max_num_of_candidates_returned: int + :param mode: Similar face searching mode. It can be "matchPerson" or + "matchFace". Possible values include: 'matchPerson', 'matchFace'. Default + value: "matchPerson" . + :type mode: str or + ~azure.cognitiveservices.vision.face.models.FindSimilarMatchMode + """ + + _validation = { + 'face_id': {'required': True}, + 'face_list_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'large_face_list_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'face_ids': {'max_items': 1000}, + 'max_num_of_candidates_returned': {'maximum': 1000, 'minimum': 1}, + } + + _attribute_map = { + 'face_id': {'key': 'faceId', 'type': 'str'}, + 'face_list_id': {'key': 'faceListId', 'type': 'str'}, + 'large_face_list_id': {'key': 'largeFaceListId', 'type': 'str'}, + 'face_ids': {'key': 'faceIds', 'type': '[str]'}, + 'max_num_of_candidates_returned': {'key': 'maxNumOfCandidatesReturned', 'type': 'int'}, + 'mode': {'key': 'mode', 'type': 'FindSimilarMatchMode'}, + } + + def __init__(self, **kwargs): + super(FindSimilarRequest, self).__init__(**kwargs) + self.face_id = kwargs.get('face_id', None) + self.face_list_id = kwargs.get('face_list_id', None) + self.large_face_list_id = kwargs.get('large_face_list_id', None) + self.face_ids = kwargs.get('face_ids', None) + self.max_num_of_candidates_returned = kwargs.get('max_num_of_candidates_returned', 20) + self.mode = kwargs.get('mode', "matchPerson") + + +class GroupRequest(Model): + """Request body for group request. + + All required parameters must be populated in order to send to Azure. + + :param face_ids: Required. Array of candidate faceId created by Face - + Detect. The maximum is 1000 faces + :type face_ids: list[str] + """ + + _validation = { + 'face_ids': {'required': True, 'max_items': 1000}, + } + + _attribute_map = { + 'face_ids': {'key': 'faceIds', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(GroupRequest, self).__init__(**kwargs) + self.face_ids = kwargs.get('face_ids', None) + + +class GroupResult(Model): + """An array of face groups based on face similarity. + + All required parameters must be populated in order to send to Azure. + + :param groups: Required. A partition of the original faces based on face + similarity. Groups are ranked by number of faces + :type groups: list[list[str]] + :param messy_group: Face ids array of faces that cannot find any similar + faces from original faces. + :type messy_group: list[str] + """ + + _validation = { + 'groups': {'required': True}, + } + + _attribute_map = { + 'groups': {'key': 'groups', 'type': '[[str]]'}, + 'messy_group': {'key': 'messyGroup', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(GroupResult, self).__init__(**kwargs) + self.groups = kwargs.get('groups', None) + self.messy_group = kwargs.get('messy_group', None) + + +class Hair(Model): + """Properties describing hair attributes. + + :param bald: A number describing confidence level of whether the person is + bald. + :type bald: float + :param invisible: A boolean value describing whether the hair is visible + in the image. + :type invisible: bool + :param hair_color: An array of candidate colors and confidence level in + the presence of each. + :type hair_color: + list[~azure.cognitiveservices.vision.face.models.HairColor] + """ + + _attribute_map = { + 'bald': {'key': 'bald', 'type': 'float'}, + 'invisible': {'key': 'invisible', 'type': 'bool'}, + 'hair_color': {'key': 'hairColor', 'type': '[HairColor]'}, + } + + def __init__(self, **kwargs): + super(Hair, self).__init__(**kwargs) + self.bald = kwargs.get('bald', None) + self.invisible = kwargs.get('invisible', None) + self.hair_color = kwargs.get('hair_color', None) + + +class HairColor(Model): + """Hair color and associated confidence. + + :param color: Name of the hair color. Possible values include: 'unknown', + 'white', 'gray', 'blond', 'brown', 'red', 'black', 'other' + :type color: str or + ~azure.cognitiveservices.vision.face.models.HairColorType + :param confidence: Confidence level of the color + :type confidence: float + """ + + _attribute_map = { + 'color': {'key': 'color', 'type': 'HairColorType'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(HairColor, self).__init__(**kwargs) + self.color = kwargs.get('color', None) + self.confidence = kwargs.get('confidence', None) + + +class HeadPose(Model): + """Properties indicating head pose of the face. + + :param roll: + :type roll: float + :param yaw: + :type yaw: float + :param pitch: + :type pitch: float + """ + + _attribute_map = { + 'roll': {'key': 'roll', 'type': 'float'}, + 'yaw': {'key': 'yaw', 'type': 'float'}, + 'pitch': {'key': 'pitch', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(HeadPose, self).__init__(**kwargs) + self.roll = kwargs.get('roll', None) + self.yaw = kwargs.get('yaw', None) + self.pitch = kwargs.get('pitch', None) + + +class IdentifyCandidate(Model): + """All possible faces that may qualify. + + All required parameters must be populated in order to send to Azure. + + :param person_id: Required. Id of candidate + :type person_id: str + :param confidence: Required. Confidence threshold of identification, used + to judge whether one face belong to one person. The range of + confidenceThreshold is [0, 1] (default specified by algorithm). + :type confidence: float + """ + + _validation = { + 'person_id': {'required': True}, + 'confidence': {'required': True}, + } + + _attribute_map = { + 'person_id': {'key': 'personId', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(IdentifyCandidate, self).__init__(**kwargs) + self.person_id = kwargs.get('person_id', None) + self.confidence = kwargs.get('confidence', None) + + +class IdentifyRequest(Model): + """Request body for identify face operation. + + All required parameters must be populated in order to send to Azure. + + :param face_ids: Required. Array of query faces faceIds, created by the + Face - Detect. Each of the faces are identified independently. The valid + number of faceIds is between [1, 10]. + :type face_ids: list[str] + :param person_group_id: PersonGroupId of the target person group, created + by PersonGroup - Create. Parameter personGroupId and largePersonGroupId + should not be provided at the same time. + :type person_group_id: str + :param large_person_group_id: LargePersonGroupId of the target large + person group, created by LargePersonGroup - Create. Parameter + personGroupId and largePersonGroupId should not be provided at the same + time. + :type large_person_group_id: str + :param max_num_of_candidates_returned: The range of + maxNumOfCandidatesReturned is between 1 and 5 (default is 1). Default + value: 1 . + :type max_num_of_candidates_returned: int + :param confidence_threshold: Confidence threshold of identification, used + to judge whether one face belong to one person. The range of + confidenceThreshold is [0, 1] (default specified by algorithm). + :type confidence_threshold: float + """ + + _validation = { + 'face_ids': {'required': True, 'max_items': 10}, + 'person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'large_person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'max_num_of_candidates_returned': {'maximum': 5, 'minimum': 1}, + } + + _attribute_map = { + 'face_ids': {'key': 'faceIds', 'type': '[str]'}, + 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, + 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, + 'max_num_of_candidates_returned': {'key': 'maxNumOfCandidatesReturned', 'type': 'int'}, + 'confidence_threshold': {'key': 'confidenceThreshold', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(IdentifyRequest, self).__init__(**kwargs) + self.face_ids = kwargs.get('face_ids', None) + self.person_group_id = kwargs.get('person_group_id', None) + self.large_person_group_id = kwargs.get('large_person_group_id', None) + self.max_num_of_candidates_returned = kwargs.get('max_num_of_candidates_returned', 1) + self.confidence_threshold = kwargs.get('confidence_threshold', None) + + +class IdentifyResult(Model): + """Response body for identify face operation. + + All required parameters must be populated in order to send to Azure. + + :param face_id: Required. FaceId of the query face + :type face_id: str + :param candidates: Required. Identified person candidates for that face + (ranked by confidence). Array size should be no larger than input + maxNumOfCandidatesReturned. If no person is identified, will return an + empty array. + :type candidates: + list[~azure.cognitiveservices.vision.face.models.IdentifyCandidate] + """ + + _validation = { + 'face_id': {'required': True}, + 'candidates': {'required': True}, + } + + _attribute_map = { + 'face_id': {'key': 'faceId', 'type': 'str'}, + 'candidates': {'key': 'candidates', 'type': '[IdentifyCandidate]'}, + } + + def __init__(self, **kwargs): + super(IdentifyResult, self).__init__(**kwargs) + self.face_id = kwargs.get('face_id', None) + self.candidates = kwargs.get('candidates', None) + + +class ImageUrl(Model): + """ImageUrl. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. Publicly reachable URL of an image + :type url: str + """ + + _validation = { + 'url': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ImageUrl, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + + +class LargeFaceList(MetaDataContract): + """Large face list object. + + All required parameters must be populated in order to send to Azure. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + :param large_face_list_id: Required. LargeFaceListId of the target large + face list. + :type large_face_list_id: str + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + 'large_face_list_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + 'large_face_list_id': {'key': 'largeFaceListId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(LargeFaceList, self).__init__(**kwargs) + self.large_face_list_id = kwargs.get('large_face_list_id', None) + + +class LargePersonGroup(MetaDataContract): + """Large person group object. + + All required parameters must be populated in order to send to Azure. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + :param large_person_group_id: Required. LargePersonGroupId of the target + large person groups + :type large_person_group_id: str + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + 'large_person_group_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(LargePersonGroup, self).__init__(**kwargs) + self.large_person_group_id = kwargs.get('large_person_group_id', None) + + +class Makeup(Model): + """Properties describing present makeups on a given face. + + :param eye_makeup: A boolean value describing whether eye makeup is + present on a face. + :type eye_makeup: bool + :param lip_makeup: A boolean value describing whether lip makeup is + present on a face. + :type lip_makeup: bool + """ + + _attribute_map = { + 'eye_makeup': {'key': 'eyeMakeup', 'type': 'bool'}, + 'lip_makeup': {'key': 'lipMakeup', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(Makeup, self).__init__(**kwargs) + self.eye_makeup = kwargs.get('eye_makeup', None) + self.lip_makeup = kwargs.get('lip_makeup', None) + + +class Noise(Model): + """Properties describing noise level of the image. + + :param noise_level: An enum value indicating level of noise. Possible + values include: 'Low', 'Medium', 'High' + :type noise_level: str or + ~azure.cognitiveservices.vision.face.models.NoiseLevel + :param value: A number indicating level of noise level ranging from 0 to + 1. [0, 0.25) is under exposure. [0.25, 0.75) is good exposure. [0.75, 1] + is over exposure. [0, 0.3) is low noise level. [0.3, 0.7) is medium noise + level. [0.7, 1] is high noise level. + :type value: float + """ + + _attribute_map = { + 'noise_level': {'key': 'noiseLevel', 'type': 'NoiseLevel'}, + 'value': {'key': 'value', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(Noise, self).__init__(**kwargs) + self.noise_level = kwargs.get('noise_level', None) + self.value = kwargs.get('value', None) + + +class Occlusion(Model): + """Properties describing occlusions on a given face. + + :param forehead_occluded: A boolean value indicating whether forehead is + occluded. + :type forehead_occluded: bool + :param eye_occluded: A boolean value indicating whether eyes are occluded. + :type eye_occluded: bool + :param mouth_occluded: A boolean value indicating whether the mouth is + occluded. + :type mouth_occluded: bool + """ + + _attribute_map = { + 'forehead_occluded': {'key': 'foreheadOccluded', 'type': 'bool'}, + 'eye_occluded': {'key': 'eyeOccluded', 'type': 'bool'}, + 'mouth_occluded': {'key': 'mouthOccluded', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(Occlusion, self).__init__(**kwargs) + self.forehead_occluded = kwargs.get('forehead_occluded', None) + self.eye_occluded = kwargs.get('eye_occluded', None) + self.mouth_occluded = kwargs.get('mouth_occluded', None) + + +class OperationStatus(Model): + """Operation status object. Operation refers to the asynchronous backend task + including taking a snapshot and applying a snapshot. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. Operation status: notstarted, running, succeeded, + failed. If the operation is requested and waiting to perform, the status + is notstarted. If the operation is ongoing in backend, the status is + running. Status succeeded means the operation is completed successfully, + specifically for snapshot taking operation, it illustrates the snapshot is + well taken and ready to apply, and for snapshot applying operation, it + presents the target object has finished creating by the snapshot and ready + to be used. Status failed is often caused by editing the source object + while taking the snapshot or editing the target object while applying the + snapshot before completion, see the field "message" to check the failure + reason. Possible values include: 'notstarted', 'running', 'succeeded', + 'failed' + :type status: str or + ~azure.cognitiveservices.vision.face.models.OperationStatusType + :param created_time: Required. A combined UTC date and time string that + describes the time when the operation (take or apply a snapshot) is + requested. E.g. 2018-12-25T11:41:02.2331413Z. + :type created_time: datetime + :param last_action_time: A combined UTC date and time string that + describes the last time the operation (take or apply a snapshot) is + actively migrating data. The lastActionTime will keep increasing until the + operation finishes. E.g. 2018-12-25T11:51:27.8705696Z. + :type last_action_time: datetime + :param resource_location: When the operation succeeds successfully, for + snapshot taking operation the snapshot id will be included in this field, + and for snapshot applying operation, the path to get the target object + will be returned in this field. + :type resource_location: str + :param message: Show failure message when operation fails (omitted when + operation succeeds). + :type message: str + """ + + _validation = { + 'status': {'required': True}, + 'created_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'OperationStatusType'}, + 'created_time': {'key': 'createdTime', 'type': 'iso-8601'}, + 'last_action_time': {'key': 'lastActionTime', 'type': 'iso-8601'}, + 'resource_location': {'key': 'resourceLocation', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(OperationStatus, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.created_time = kwargs.get('created_time', None) + self.last_action_time = kwargs.get('last_action_time', None) + self.resource_location = kwargs.get('resource_location', None) + self.message = kwargs.get('message', None) + + +class PersistedFace(Model): + """PersonFace object. + + All required parameters must be populated in order to send to Azure. + + :param persisted_face_id: Required. The persistedFaceId of the target + face, which is persisted and will not expire. Different from faceId + created by Face - Detect and will expire in 24 hours after the detection + call. + :type persisted_face_id: str + :param user_data: User-provided data attached to the face. The size limit + is 1KB. + :type user_data: str + """ + + _validation = { + 'persisted_face_id': {'required': True}, + 'user_data': {'max_length': 1024}, + } + + _attribute_map = { + 'persisted_face_id': {'key': 'persistedFaceId', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PersistedFace, self).__init__(**kwargs) + self.persisted_face_id = kwargs.get('persisted_face_id', None) + self.user_data = kwargs.get('user_data', None) + + +class Person(NameAndUserDataContract): + """Person object. + + All required parameters must be populated in order to send to Azure. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param person_id: Required. PersonId of the target face list. + :type person_id: str + :param persisted_face_ids: PersistedFaceIds of registered faces in the + person. These persistedFaceIds are returned from Person - Add a Person + Face, and will not expire. + :type persisted_face_ids: list[str] + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + 'person_id': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'person_id': {'key': 'personId', 'type': 'str'}, + 'persisted_face_ids': {'key': 'persistedFaceIds', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(Person, self).__init__(**kwargs) + self.person_id = kwargs.get('person_id', None) + self.persisted_face_ids = kwargs.get('persisted_face_ids', None) + + +class PersonGroup(MetaDataContract): + """Person group object. + + All required parameters must be populated in order to send to Azure. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + :param person_group_id: Required. PersonGroupId of the target person + group. + :type person_group_id: str + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + 'person_group_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PersonGroup, self).__init__(**kwargs) + self.person_group_id = kwargs.get('person_group_id', None) + + +class SimilarFace(Model): + """Response body for find similar face operation. + + All required parameters must be populated in order to send to Azure. + + :param face_id: FaceId of candidate face when find by faceIds. faceId is + created by Face - Detect and will expire 24 hours after the detection call + :type face_id: str + :param persisted_face_id: PersistedFaceId of candidate face when find by + faceListId. persistedFaceId in face list is persisted and will not expire. + As showed in below response + :type persisted_face_id: str + :param confidence: Required. Similarity confidence of the candidate face. + The higher confidence, the more similar. Range between [0,1]. + :type confidence: float + """ + + _validation = { + 'confidence': {'required': True}, + } + + _attribute_map = { + 'face_id': {'key': 'faceId', 'type': 'str'}, + 'persisted_face_id': {'key': 'persistedFaceId', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(SimilarFace, self).__init__(**kwargs) + self.face_id = kwargs.get('face_id', None) + self.persisted_face_id = kwargs.get('persisted_face_id', None) + self.confidence = kwargs.get('confidence', None) + + +class Snapshot(Model): + """Snapshot object. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Snapshot id. + :type id: str + :param account: Required. Azure Cognitive Service Face account id of the + subscriber who created the snapshot by Snapshot - Take. + :type account: str + :param type: Required. Type of the source object in the snapshot, + specified by the subscriber who created the snapshot when calling Snapshot + - Take. Currently FaceList, PersonGroup, LargeFaceList and + LargePersonGroup are supported. Possible values include: 'FaceList', + 'LargeFaceList', 'LargePersonGroup', 'PersonGroup' + :type type: str or + ~azure.cognitiveservices.vision.face.models.SnapshotObjectType + :param apply_scope: Required. Array of the target Face subscription ids + for the snapshot, specified by the user who created the snapshot when + calling Snapshot - Take. For each snapshot, only subscriptions included in + the applyScope of Snapshot - Take can apply it. + :type apply_scope: list[str] + :param user_data: User specified data about the snapshot for any purpose. + Length should not exceed 16KB. + :type user_data: str + :param created_time: Required. A combined UTC date and time string that + describes the created time of the snapshot. E.g. + 2018-12-25T11:41:02.2331413Z. + :type created_time: datetime + :param last_update_time: Required. A combined UTC date and time string + that describes the last time when the snapshot was created or updated by + Snapshot - Update. E.g. 2018-12-25T11:51:27.8705696Z. + :type last_update_time: datetime + """ + + _validation = { + 'id': {'required': True}, + 'account': {'required': True}, + 'type': {'required': True}, + 'apply_scope': {'required': True}, + 'user_data': {'max_length': 16384}, + 'created_time': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'account': {'key': 'account', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'SnapshotObjectType'}, + 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'created_time': {'key': 'createdTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(Snapshot, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.account = kwargs.get('account', None) + self.type = kwargs.get('type', None) + self.apply_scope = kwargs.get('apply_scope', None) + self.user_data = kwargs.get('user_data', None) + self.created_time = kwargs.get('created_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + + +class TakeSnapshotRequest(Model): + """Request body for taking snapshot operation. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. User specified type for the source object to take + snapshot from. Currently FaceList, PersonGroup, LargeFaceList and + LargePersonGroup are supported. Possible values include: 'FaceList', + 'LargeFaceList', 'LargePersonGroup', 'PersonGroup' + :type type: str or + ~azure.cognitiveservices.vision.face.models.SnapshotObjectType + :param object_id: Required. User specified source object id to take + snapshot from. + :type object_id: str + :param apply_scope: Required. User specified array of target Face + subscription ids for the snapshot. For each snapshot, only subscriptions + included in the applyScope of Snapshot - Take can apply it. + :type apply_scope: list[str] + :param user_data: User specified data about the snapshot for any purpose. + Length should not exceed 16KB. + :type user_data: str + """ + + _validation = { + 'type': {'required': True}, + 'object_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'apply_scope': {'required': True}, + 'user_data': {'max_length': 16384}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'SnapshotObjectType'}, + 'object_id': {'key': 'objectId', 'type': 'str'}, + 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(TakeSnapshotRequest, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.object_id = kwargs.get('object_id', None) + self.apply_scope = kwargs.get('apply_scope', None) + self.user_data = kwargs.get('user_data', None) + + +class TrainingStatus(Model): + """Training status object. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. Training status: notstarted, running, succeeded, + failed. If the training process is waiting to perform, the status is + notstarted. If the training is ongoing, the status is running. Status + succeed means this person group or large person group is ready for Face - + Identify, or this large face list is ready for Face - Find Similar. Status + failed is often caused by no person or no persisted face exist in the + person group or large person group, or no persisted face exist in the + large face list. Possible values include: 'nonstarted', 'running', + 'succeeded', 'failed' + :type status: str or + ~azure.cognitiveservices.vision.face.models.TrainingStatusType + :param created: Required. A combined UTC date and time string that + describes the created time of the person group, large person group or + large face list. + :type created: datetime + :param last_action: A combined UTC date and time string that describes the + last modify time of the person group, large person group or large face + list, could be null value when the group is not successfully trained. + :type last_action: datetime + :param last_successful_training: A combined UTC date and time string that + describes the last successful training time of the person group, large + person group or large face list. + :type last_successful_training: datetime + :param message: Show failure message when training failed (omitted when + training succeed). + :type message: str + """ + + _validation = { + 'status': {'required': True}, + 'created': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TrainingStatusType'}, + 'created': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'last_action': {'key': 'lastActionDateTime', 'type': 'iso-8601'}, + 'last_successful_training': {'key': 'lastSuccessfulTrainingDateTime', 'type': 'iso-8601'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(TrainingStatus, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.created = kwargs.get('created', None) + self.last_action = kwargs.get('last_action', None) + self.last_successful_training = kwargs.get('last_successful_training', None) + self.message = kwargs.get('message', None) + + +class UpdateFaceRequest(Model): + """Request to update face data. + + :param user_data: User-provided data attached to the face. The size limit + is 1KB. + :type user_data: str + """ + + _validation = { + 'user_data': {'max_length': 1024}, + } + + _attribute_map = { + 'user_data': {'key': 'userData', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(UpdateFaceRequest, self).__init__(**kwargs) + self.user_data = kwargs.get('user_data', None) + + +class UpdateSnapshotRequest(Model): + """Request body for updating a snapshot, with a combination of user defined + apply scope and user specified data. + + :param apply_scope: Array of the target Face subscription ids for the + snapshot, specified by the user who created the snapshot when calling + Snapshot - Take. For each snapshot, only subscriptions included in the + applyScope of Snapshot - Take can apply it. + :type apply_scope: list[str] + :param user_data: User specified data about the snapshot for any purpose. + Length should not exceed 16KB. + :type user_data: str + """ + + _validation = { + 'user_data': {'max_length': 16384}, + } + + _attribute_map = { + 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(UpdateSnapshotRequest, self).__init__(**kwargs) + self.apply_scope = kwargs.get('apply_scope', None) + self.user_data = kwargs.get('user_data', None) + + +class VerifyFaceToFaceRequest(Model): + """Request body for face to face verification. + + All required parameters must be populated in order to send to Azure. + + :param face_id1: Required. FaceId of the first face, comes from Face - + Detect + :type face_id1: str + :param face_id2: Required. FaceId of the second face, comes from Face - + Detect + :type face_id2: str + """ + + _validation = { + 'face_id1': {'required': True}, + 'face_id2': {'required': True}, + } + + _attribute_map = { + 'face_id1': {'key': 'faceId1', 'type': 'str'}, + 'face_id2': {'key': 'faceId2', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(VerifyFaceToFaceRequest, self).__init__(**kwargs) + self.face_id1 = kwargs.get('face_id1', None) + self.face_id2 = kwargs.get('face_id2', None) + + +class VerifyFaceToPersonRequest(Model): + """Request body for face to person verification. + + All required parameters must be populated in order to send to Azure. + + :param face_id: Required. FaceId of the face, comes from Face - Detect + :type face_id: str + :param person_group_id: Using existing personGroupId and personId for fast + loading a specified person. personGroupId is created in PersonGroup - + Create. Parameter personGroupId and largePersonGroupId should not be + provided at the same time. + :type person_group_id: str + :param large_person_group_id: Using existing largePersonGroupId and + personId for fast loading a specified person. largePersonGroupId is + created in LargePersonGroup - Create. Parameter personGroupId and + largePersonGroupId should not be provided at the same time. + :type large_person_group_id: str + :param person_id: Required. Specify a certain person in a person group or + a large person group. personId is created in PersonGroup Person - Create + or LargePersonGroup Person - Create. + :type person_id: str + """ + + _validation = { + 'face_id': {'required': True}, + 'person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'large_person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'person_id': {'required': True}, + } + + _attribute_map = { + 'face_id': {'key': 'faceId', 'type': 'str'}, + 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, + 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, + 'person_id': {'key': 'personId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(VerifyFaceToPersonRequest, self).__init__(**kwargs) + self.face_id = kwargs.get('face_id', None) + self.person_group_id = kwargs.get('person_group_id', None) + self.large_person_group_id = kwargs.get('large_person_group_id', None) + self.person_id = kwargs.get('person_id', None) + + +class VerifyResult(Model): + """Result of the verify operation. + + All required parameters must be populated in order to send to Azure. + + :param is_identical: Required. True if the two faces belong to the same + person or the face belongs to the person, otherwise false. + :type is_identical: bool + :param confidence: Required. A number indicates the similarity confidence + of whether two faces belong to the same person, or whether the face + belongs to the person. By default, isIdentical is set to True if + similarity confidence is greater than or equal to 0.5. This is useful for + advanced users to override "isIdentical" and fine-tune the result on their + own data. + :type confidence: float + """ + + _validation = { + 'is_identical': {'required': True}, + 'confidence': {'required': True}, + } + + _attribute_map = { + 'is_identical': {'key': 'isIdentical', 'type': 'bool'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(VerifyResult, self).__init__(**kwargs) + self.is_identical = kwargs.get('is_identical', None) + self.confidence = kwargs.get('confidence', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/_models_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/_models_py3.py new file mode 100644 index 000000000000..64ac24c59ed7 --- /dev/null +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/_models_py3.py @@ -0,0 +1,1639 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class Accessory(Model): + """Accessory item and corresponding confidence level. + + :param type: Type of an accessory. Possible values include: 'headWear', + 'glasses', 'mask' + :type type: str or + ~azure.cognitiveservices.vision.face.models.AccessoryType + :param confidence: Confidence level of an accessory + :type confidence: float + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'AccessoryType'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, *, type=None, confidence: float=None, **kwargs) -> None: + super(Accessory, self).__init__(**kwargs) + self.type = type + self.confidence = confidence + + +class APIError(Model): + """Error information returned by the API. + + :param error: + :type error: ~azure.cognitiveservices.vision.face.models.Error + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'Error'}, + } + + def __init__(self, *, error=None, **kwargs) -> None: + super(APIError, self).__init__(**kwargs) + self.error = error + + +class APIErrorException(HttpOperationError): + """Server responsed with exception of type: 'APIError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(APIErrorException, self).__init__(deserialize, response, 'APIError', *args) + + +class ApplySnapshotRequest(Model): + """Request body for applying snapshot operation. + + All required parameters must be populated in order to send to Azure. + + :param object_id: Required. User specified target object id to be created + from the snapshot. + :type object_id: str + :param mode: Snapshot applying mode. Currently only CreateNew is + supported, which means the apply operation will fail if target + subscription already contains an object of same type and using the same + objectId. Users can specify the "objectId" in request body to avoid such + conflicts. Possible values include: 'CreateNew'. Default value: + "CreateNew" . + :type mode: str or + ~azure.cognitiveservices.vision.face.models.SnapshotApplyMode + """ + + _validation = { + 'object_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + } + + _attribute_map = { + 'object_id': {'key': 'objectId', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'SnapshotApplyMode'}, + } + + def __init__(self, *, object_id: str, mode="CreateNew", **kwargs) -> None: + super(ApplySnapshotRequest, self).__init__(**kwargs) + self.object_id = object_id + self.mode = mode + + +class Blur(Model): + """Properties describing any presence of blur within the image. + + :param blur_level: An enum value indicating level of blurriness. Possible + values include: 'Low', 'Medium', 'High' + :type blur_level: str or + ~azure.cognitiveservices.vision.face.models.BlurLevel + :param value: A number indicating level of blurriness ranging from 0 to 1. + :type value: float + """ + + _attribute_map = { + 'blur_level': {'key': 'blurLevel', 'type': 'BlurLevel'}, + 'value': {'key': 'value', 'type': 'float'}, + } + + def __init__(self, *, blur_level=None, value: float=None, **kwargs) -> None: + super(Blur, self).__init__(**kwargs) + self.blur_level = blur_level + self.value = value + + +class Coordinate(Model): + """Coordinates within an image. + + All required parameters must be populated in order to send to Azure. + + :param x: Required. The horizontal component, in pixels. + :type x: float + :param y: Required. The vertical component, in pixels. + :type y: float + """ + + _validation = { + 'x': {'required': True}, + 'y': {'required': True}, + } + + _attribute_map = { + 'x': {'key': 'x', 'type': 'float'}, + 'y': {'key': 'y', 'type': 'float'}, + } + + def __init__(self, *, x: float, y: float, **kwargs) -> None: + super(Coordinate, self).__init__(**kwargs) + self.x = x + self.y = y + + +class DetectedFace(Model): + """Detected Face object. + + All required parameters must be populated in order to send to Azure. + + :param face_id: + :type face_id: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + :param face_rectangle: Required. + :type face_rectangle: + ~azure.cognitiveservices.vision.face.models.FaceRectangle + :param face_landmarks: + :type face_landmarks: + ~azure.cognitiveservices.vision.face.models.FaceLandmarks + :param face_attributes: + :type face_attributes: + ~azure.cognitiveservices.vision.face.models.FaceAttributes + """ + + _validation = { + 'face_rectangle': {'required': True}, + } + + _attribute_map = { + 'face_id': {'key': 'faceId', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, + 'face_landmarks': {'key': 'faceLandmarks', 'type': 'FaceLandmarks'}, + 'face_attributes': {'key': 'faceAttributes', 'type': 'FaceAttributes'}, + } + + def __init__(self, *, face_rectangle, face_id: str=None, recognition_model="recognition_01", face_landmarks=None, face_attributes=None, **kwargs) -> None: + super(DetectedFace, self).__init__(**kwargs) + self.face_id = face_id + self.recognition_model = recognition_model + self.face_rectangle = face_rectangle + self.face_landmarks = face_landmarks + self.face_attributes = face_attributes + + +class Emotion(Model): + """Properties describing facial emotion in form of confidence ranging from 0 + to 1. + + :param anger: + :type anger: float + :param contempt: + :type contempt: float + :param disgust: + :type disgust: float + :param fear: + :type fear: float + :param happiness: + :type happiness: float + :param neutral: + :type neutral: float + :param sadness: + :type sadness: float + :param surprise: + :type surprise: float + """ + + _attribute_map = { + 'anger': {'key': 'anger', 'type': 'float'}, + 'contempt': {'key': 'contempt', 'type': 'float'}, + 'disgust': {'key': 'disgust', 'type': 'float'}, + 'fear': {'key': 'fear', 'type': 'float'}, + 'happiness': {'key': 'happiness', 'type': 'float'}, + 'neutral': {'key': 'neutral', 'type': 'float'}, + 'sadness': {'key': 'sadness', 'type': 'float'}, + 'surprise': {'key': 'surprise', 'type': 'float'}, + } + + def __init__(self, *, anger: float=None, contempt: float=None, disgust: float=None, fear: float=None, happiness: float=None, neutral: float=None, sadness: float=None, surprise: float=None, **kwargs) -> None: + super(Emotion, self).__init__(**kwargs) + self.anger = anger + self.contempt = contempt + self.disgust = disgust + self.fear = fear + self.happiness = happiness + self.neutral = neutral + self.sadness = sadness + self.surprise = surprise + + +class Error(Model): + """Error body. + + :param code: + :type code: str + :param message: + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None: + super(Error, self).__init__(**kwargs) + self.code = code + self.message = message + + +class Exposure(Model): + """Properties describing exposure level of the image. + + :param exposure_level: An enum value indicating level of exposure. + Possible values include: 'UnderExposure', 'GoodExposure', 'OverExposure' + :type exposure_level: str or + ~azure.cognitiveservices.vision.face.models.ExposureLevel + :param value: A number indicating level of exposure level ranging from 0 + to 1. [0, 0.25) is under exposure. [0.25, 0.75) is good exposure. [0.75, + 1] is over exposure. + :type value: float + """ + + _attribute_map = { + 'exposure_level': {'key': 'exposureLevel', 'type': 'ExposureLevel'}, + 'value': {'key': 'value', 'type': 'float'}, + } + + def __init__(self, *, exposure_level=None, value: float=None, **kwargs) -> None: + super(Exposure, self).__init__(**kwargs) + self.exposure_level = exposure_level + self.value = value + + +class FaceAttributes(Model): + """Face Attributes. + + :param age: Age in years + :type age: float + :param gender: Possible gender of the face. Possible values include: + 'male', 'female' + :type gender: str or ~azure.cognitiveservices.vision.face.models.Gender + :param smile: Smile intensity, a number between [0,1] + :type smile: float + :param facial_hair: Properties describing facial hair attributes. + :type facial_hair: ~azure.cognitiveservices.vision.face.models.FacialHair + :param glasses: Glasses type if any of the face. Possible values include: + 'noGlasses', 'readingGlasses', 'sunglasses', 'swimmingGoggles' + :type glasses: str or + ~azure.cognitiveservices.vision.face.models.GlassesType + :param head_pose: Properties indicating head pose of the face. + :type head_pose: ~azure.cognitiveservices.vision.face.models.HeadPose + :param emotion: Properties describing facial emotion in form of confidence + ranging from 0 to 1. + :type emotion: ~azure.cognitiveservices.vision.face.models.Emotion + :param hair: Properties describing hair attributes. + :type hair: ~azure.cognitiveservices.vision.face.models.Hair + :param makeup: Properties describing present makeups on a given face. + :type makeup: ~azure.cognitiveservices.vision.face.models.Makeup + :param occlusion: Properties describing occlusions on a given face. + :type occlusion: ~azure.cognitiveservices.vision.face.models.Occlusion + :param accessories: Properties describing any accessories on a given face. + :type accessories: + list[~azure.cognitiveservices.vision.face.models.Accessory] + :param blur: Properties describing any presence of blur within the image. + :type blur: ~azure.cognitiveservices.vision.face.models.Blur + :param exposure: Properties describing exposure level of the image. + :type exposure: ~azure.cognitiveservices.vision.face.models.Exposure + :param noise: Properties describing noise level of the image. + :type noise: ~azure.cognitiveservices.vision.face.models.Noise + """ + + _attribute_map = { + 'age': {'key': 'age', 'type': 'float'}, + 'gender': {'key': 'gender', 'type': 'Gender'}, + 'smile': {'key': 'smile', 'type': 'float'}, + 'facial_hair': {'key': 'facialHair', 'type': 'FacialHair'}, + 'glasses': {'key': 'glasses', 'type': 'GlassesType'}, + 'head_pose': {'key': 'headPose', 'type': 'HeadPose'}, + 'emotion': {'key': 'emotion', 'type': 'Emotion'}, + 'hair': {'key': 'hair', 'type': 'Hair'}, + 'makeup': {'key': 'makeup', 'type': 'Makeup'}, + 'occlusion': {'key': 'occlusion', 'type': 'Occlusion'}, + 'accessories': {'key': 'accessories', 'type': '[Accessory]'}, + 'blur': {'key': 'blur', 'type': 'Blur'}, + 'exposure': {'key': 'exposure', 'type': 'Exposure'}, + 'noise': {'key': 'noise', 'type': 'Noise'}, + } + + def __init__(self, *, age: float=None, gender=None, smile: float=None, facial_hair=None, glasses=None, head_pose=None, emotion=None, hair=None, makeup=None, occlusion=None, accessories=None, blur=None, exposure=None, noise=None, **kwargs) -> None: + super(FaceAttributes, self).__init__(**kwargs) + self.age = age + self.gender = gender + self.smile = smile + self.facial_hair = facial_hair + self.glasses = glasses + self.head_pose = head_pose + self.emotion = emotion + self.hair = hair + self.makeup = makeup + self.occlusion = occlusion + self.accessories = accessories + self.blur = blur + self.exposure = exposure + self.noise = noise + + +class FaceLandmarks(Model): + """A collection of 27-point face landmarks pointing to the important positions + of face components. + + :param pupil_left: + :type pupil_left: ~azure.cognitiveservices.vision.face.models.Coordinate + :param pupil_right: + :type pupil_right: ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_tip: + :type nose_tip: ~azure.cognitiveservices.vision.face.models.Coordinate + :param mouth_left: + :type mouth_left: ~azure.cognitiveservices.vision.face.models.Coordinate + :param mouth_right: + :type mouth_right: ~azure.cognitiveservices.vision.face.models.Coordinate + :param eyebrow_left_outer: + :type eyebrow_left_outer: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eyebrow_left_inner: + :type eyebrow_left_inner: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_left_outer: + :type eye_left_outer: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_left_top: + :type eye_left_top: ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_left_bottom: + :type eye_left_bottom: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_left_inner: + :type eye_left_inner: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eyebrow_right_inner: + :type eyebrow_right_inner: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eyebrow_right_outer: + :type eyebrow_right_outer: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_right_inner: + :type eye_right_inner: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_right_top: + :type eye_right_top: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_right_bottom: + :type eye_right_bottom: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param eye_right_outer: + :type eye_right_outer: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_root_left: + :type nose_root_left: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_root_right: + :type nose_root_right: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_left_alar_top: + :type nose_left_alar_top: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_right_alar_top: + :type nose_right_alar_top: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_left_alar_out_tip: + :type nose_left_alar_out_tip: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param nose_right_alar_out_tip: + :type nose_right_alar_out_tip: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param upper_lip_top: + :type upper_lip_top: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param upper_lip_bottom: + :type upper_lip_bottom: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param under_lip_top: + :type under_lip_top: + ~azure.cognitiveservices.vision.face.models.Coordinate + :param under_lip_bottom: + :type under_lip_bottom: + ~azure.cognitiveservices.vision.face.models.Coordinate + """ + + _attribute_map = { + 'pupil_left': {'key': 'pupilLeft', 'type': 'Coordinate'}, + 'pupil_right': {'key': 'pupilRight', 'type': 'Coordinate'}, + 'nose_tip': {'key': 'noseTip', 'type': 'Coordinate'}, + 'mouth_left': {'key': 'mouthLeft', 'type': 'Coordinate'}, + 'mouth_right': {'key': 'mouthRight', 'type': 'Coordinate'}, + 'eyebrow_left_outer': {'key': 'eyebrowLeftOuter', 'type': 'Coordinate'}, + 'eyebrow_left_inner': {'key': 'eyebrowLeftInner', 'type': 'Coordinate'}, + 'eye_left_outer': {'key': 'eyeLeftOuter', 'type': 'Coordinate'}, + 'eye_left_top': {'key': 'eyeLeftTop', 'type': 'Coordinate'}, + 'eye_left_bottom': {'key': 'eyeLeftBottom', 'type': 'Coordinate'}, + 'eye_left_inner': {'key': 'eyeLeftInner', 'type': 'Coordinate'}, + 'eyebrow_right_inner': {'key': 'eyebrowRightInner', 'type': 'Coordinate'}, + 'eyebrow_right_outer': {'key': 'eyebrowRightOuter', 'type': 'Coordinate'}, + 'eye_right_inner': {'key': 'eyeRightInner', 'type': 'Coordinate'}, + 'eye_right_top': {'key': 'eyeRightTop', 'type': 'Coordinate'}, + 'eye_right_bottom': {'key': 'eyeRightBottom', 'type': 'Coordinate'}, + 'eye_right_outer': {'key': 'eyeRightOuter', 'type': 'Coordinate'}, + 'nose_root_left': {'key': 'noseRootLeft', 'type': 'Coordinate'}, + 'nose_root_right': {'key': 'noseRootRight', 'type': 'Coordinate'}, + 'nose_left_alar_top': {'key': 'noseLeftAlarTop', 'type': 'Coordinate'}, + 'nose_right_alar_top': {'key': 'noseRightAlarTop', 'type': 'Coordinate'}, + 'nose_left_alar_out_tip': {'key': 'noseLeftAlarOutTip', 'type': 'Coordinate'}, + 'nose_right_alar_out_tip': {'key': 'noseRightAlarOutTip', 'type': 'Coordinate'}, + 'upper_lip_top': {'key': 'upperLipTop', 'type': 'Coordinate'}, + 'upper_lip_bottom': {'key': 'upperLipBottom', 'type': 'Coordinate'}, + 'under_lip_top': {'key': 'underLipTop', 'type': 'Coordinate'}, + 'under_lip_bottom': {'key': 'underLipBottom', 'type': 'Coordinate'}, + } + + def __init__(self, *, pupil_left=None, pupil_right=None, nose_tip=None, mouth_left=None, mouth_right=None, eyebrow_left_outer=None, eyebrow_left_inner=None, eye_left_outer=None, eye_left_top=None, eye_left_bottom=None, eye_left_inner=None, eyebrow_right_inner=None, eyebrow_right_outer=None, eye_right_inner=None, eye_right_top=None, eye_right_bottom=None, eye_right_outer=None, nose_root_left=None, nose_root_right=None, nose_left_alar_top=None, nose_right_alar_top=None, nose_left_alar_out_tip=None, nose_right_alar_out_tip=None, upper_lip_top=None, upper_lip_bottom=None, under_lip_top=None, under_lip_bottom=None, **kwargs) -> None: + super(FaceLandmarks, self).__init__(**kwargs) + self.pupil_left = pupil_left + self.pupil_right = pupil_right + self.nose_tip = nose_tip + self.mouth_left = mouth_left + self.mouth_right = mouth_right + self.eyebrow_left_outer = eyebrow_left_outer + self.eyebrow_left_inner = eyebrow_left_inner + self.eye_left_outer = eye_left_outer + self.eye_left_top = eye_left_top + self.eye_left_bottom = eye_left_bottom + self.eye_left_inner = eye_left_inner + self.eyebrow_right_inner = eyebrow_right_inner + self.eyebrow_right_outer = eyebrow_right_outer + self.eye_right_inner = eye_right_inner + self.eye_right_top = eye_right_top + self.eye_right_bottom = eye_right_bottom + self.eye_right_outer = eye_right_outer + self.nose_root_left = nose_root_left + self.nose_root_right = nose_root_right + self.nose_left_alar_top = nose_left_alar_top + self.nose_right_alar_top = nose_right_alar_top + self.nose_left_alar_out_tip = nose_left_alar_out_tip + self.nose_right_alar_out_tip = nose_right_alar_out_tip + self.upper_lip_top = upper_lip_top + self.upper_lip_bottom = upper_lip_bottom + self.under_lip_top = under_lip_top + self.under_lip_bottom = under_lip_bottom + + +class NameAndUserDataContract(Model): + """A combination of user defined name and user specified data for the person, + largePersonGroup/personGroup, and largeFaceList/faceList. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, user_data: str=None, **kwargs) -> None: + super(NameAndUserDataContract, self).__init__(**kwargs) + self.name = name + self.user_data = user_data + + +class MetaDataContract(NameAndUserDataContract): + """A combination of user defined name and user specified data and recognition + model name for largePersonGroup/personGroup, and largeFaceList/faceList. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, user_data: str=None, recognition_model="recognition_01", **kwargs) -> None: + super(MetaDataContract, self).__init__(name=name, user_data=user_data, **kwargs) + self.recognition_model = recognition_model + + +class FaceList(MetaDataContract): + """Face list object. + + All required parameters must be populated in order to send to Azure. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + :param face_list_id: Required. FaceListId of the target face list. + :type face_list_id: str + :param persisted_faces: Persisted faces within the face list. + :type persisted_faces: + list[~azure.cognitiveservices.vision.face.models.PersistedFace] + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + 'face_list_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + 'face_list_id': {'key': 'faceListId', 'type': 'str'}, + 'persisted_faces': {'key': 'persistedFaces', 'type': '[PersistedFace]'}, + } + + def __init__(self, *, face_list_id: str, name: str=None, user_data: str=None, recognition_model="recognition_01", persisted_faces=None, **kwargs) -> None: + super(FaceList, self).__init__(name=name, user_data=user_data, recognition_model=recognition_model, **kwargs) + self.face_list_id = face_list_id + self.persisted_faces = persisted_faces + + +class FaceRectangle(Model): + """A rectangle within which a face can be found. + + All required parameters must be populated in order to send to Azure. + + :param width: Required. The width of the rectangle, in pixels. + :type width: int + :param height: Required. The height of the rectangle, in pixels. + :type height: int + :param left: Required. The distance from the left edge if the image to the + left edge of the rectangle, in pixels. + :type left: int + :param top: Required. The distance from the top edge if the image to the + top edge of the rectangle, in pixels. + :type top: int + """ + + _validation = { + 'width': {'required': True}, + 'height': {'required': True}, + 'left': {'required': True}, + 'top': {'required': True}, + } + + _attribute_map = { + 'width': {'key': 'width', 'type': 'int'}, + 'height': {'key': 'height', 'type': 'int'}, + 'left': {'key': 'left', 'type': 'int'}, + 'top': {'key': 'top', 'type': 'int'}, + } + + def __init__(self, *, width: int, height: int, left: int, top: int, **kwargs) -> None: + super(FaceRectangle, self).__init__(**kwargs) + self.width = width + self.height = height + self.left = left + self.top = top + + +class FacialHair(Model): + """Properties describing facial hair attributes. + + :param moustache: + :type moustache: float + :param beard: + :type beard: float + :param sideburns: + :type sideburns: float + """ + + _attribute_map = { + 'moustache': {'key': 'moustache', 'type': 'float'}, + 'beard': {'key': 'beard', 'type': 'float'}, + 'sideburns': {'key': 'sideburns', 'type': 'float'}, + } + + def __init__(self, *, moustache: float=None, beard: float=None, sideburns: float=None, **kwargs) -> None: + super(FacialHair, self).__init__(**kwargs) + self.moustache = moustache + self.beard = beard + self.sideburns = sideburns + + +class FindSimilarRequest(Model): + """Request body for find similar operation. + + All required parameters must be populated in order to send to Azure. + + :param face_id: Required. FaceId of the query face. User needs to call + Face - Detect first to get a valid faceId. Note that this faceId is not + persisted and will expire 24 hours after the detection call + :type face_id: str + :param face_list_id: An existing user-specified unique candidate face + list, created in Face List - Create a Face List. Face list contains a set + of persistedFaceIds which are persisted and will never expire. Parameter + faceListId, largeFaceListId and faceIds should not be provided at the same + time. + :type face_list_id: str + :param large_face_list_id: An existing user-specified unique candidate + large face list, created in LargeFaceList - Create. Large face list + contains a set of persistedFaceIds which are persisted and will never + expire. Parameter faceListId, largeFaceListId and faceIds should not be + provided at the same time. + :type large_face_list_id: str + :param face_ids: An array of candidate faceIds. All of them are created by + Face - Detect and the faceIds will expire 24 hours after the detection + call. The number of faceIds is limited to 1000. Parameter faceListId, + largeFaceListId and faceIds should not be provided at the same time. + :type face_ids: list[str] + :param max_num_of_candidates_returned: The number of top similar faces + returned. The valid range is [1, 1000]. Default value: 20 . + :type max_num_of_candidates_returned: int + :param mode: Similar face searching mode. It can be "matchPerson" or + "matchFace". Possible values include: 'matchPerson', 'matchFace'. Default + value: "matchPerson" . + :type mode: str or + ~azure.cognitiveservices.vision.face.models.FindSimilarMatchMode + """ + + _validation = { + 'face_id': {'required': True}, + 'face_list_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'large_face_list_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'face_ids': {'max_items': 1000}, + 'max_num_of_candidates_returned': {'maximum': 1000, 'minimum': 1}, + } + + _attribute_map = { + 'face_id': {'key': 'faceId', 'type': 'str'}, + 'face_list_id': {'key': 'faceListId', 'type': 'str'}, + 'large_face_list_id': {'key': 'largeFaceListId', 'type': 'str'}, + 'face_ids': {'key': 'faceIds', 'type': '[str]'}, + 'max_num_of_candidates_returned': {'key': 'maxNumOfCandidatesReturned', 'type': 'int'}, + 'mode': {'key': 'mode', 'type': 'FindSimilarMatchMode'}, + } + + def __init__(self, *, face_id: str, face_list_id: str=None, large_face_list_id: str=None, face_ids=None, max_num_of_candidates_returned: int=20, mode="matchPerson", **kwargs) -> None: + super(FindSimilarRequest, self).__init__(**kwargs) + self.face_id = face_id + self.face_list_id = face_list_id + self.large_face_list_id = large_face_list_id + self.face_ids = face_ids + self.max_num_of_candidates_returned = max_num_of_candidates_returned + self.mode = mode + + +class GroupRequest(Model): + """Request body for group request. + + All required parameters must be populated in order to send to Azure. + + :param face_ids: Required. Array of candidate faceId created by Face - + Detect. The maximum is 1000 faces + :type face_ids: list[str] + """ + + _validation = { + 'face_ids': {'required': True, 'max_items': 1000}, + } + + _attribute_map = { + 'face_ids': {'key': 'faceIds', 'type': '[str]'}, + } + + def __init__(self, *, face_ids, **kwargs) -> None: + super(GroupRequest, self).__init__(**kwargs) + self.face_ids = face_ids + + +class GroupResult(Model): + """An array of face groups based on face similarity. + + All required parameters must be populated in order to send to Azure. + + :param groups: Required. A partition of the original faces based on face + similarity. Groups are ranked by number of faces + :type groups: list[list[str]] + :param messy_group: Face ids array of faces that cannot find any similar + faces from original faces. + :type messy_group: list[str] + """ + + _validation = { + 'groups': {'required': True}, + } + + _attribute_map = { + 'groups': {'key': 'groups', 'type': '[[str]]'}, + 'messy_group': {'key': 'messyGroup', 'type': '[str]'}, + } + + def __init__(self, *, groups, messy_group=None, **kwargs) -> None: + super(GroupResult, self).__init__(**kwargs) + self.groups = groups + self.messy_group = messy_group + + +class Hair(Model): + """Properties describing hair attributes. + + :param bald: A number describing confidence level of whether the person is + bald. + :type bald: float + :param invisible: A boolean value describing whether the hair is visible + in the image. + :type invisible: bool + :param hair_color: An array of candidate colors and confidence level in + the presence of each. + :type hair_color: + list[~azure.cognitiveservices.vision.face.models.HairColor] + """ + + _attribute_map = { + 'bald': {'key': 'bald', 'type': 'float'}, + 'invisible': {'key': 'invisible', 'type': 'bool'}, + 'hair_color': {'key': 'hairColor', 'type': '[HairColor]'}, + } + + def __init__(self, *, bald: float=None, invisible: bool=None, hair_color=None, **kwargs) -> None: + super(Hair, self).__init__(**kwargs) + self.bald = bald + self.invisible = invisible + self.hair_color = hair_color + + +class HairColor(Model): + """Hair color and associated confidence. + + :param color: Name of the hair color. Possible values include: 'unknown', + 'white', 'gray', 'blond', 'brown', 'red', 'black', 'other' + :type color: str or + ~azure.cognitiveservices.vision.face.models.HairColorType + :param confidence: Confidence level of the color + :type confidence: float + """ + + _attribute_map = { + 'color': {'key': 'color', 'type': 'HairColorType'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, *, color=None, confidence: float=None, **kwargs) -> None: + super(HairColor, self).__init__(**kwargs) + self.color = color + self.confidence = confidence + + +class HeadPose(Model): + """Properties indicating head pose of the face. + + :param roll: + :type roll: float + :param yaw: + :type yaw: float + :param pitch: + :type pitch: float + """ + + _attribute_map = { + 'roll': {'key': 'roll', 'type': 'float'}, + 'yaw': {'key': 'yaw', 'type': 'float'}, + 'pitch': {'key': 'pitch', 'type': 'float'}, + } + + def __init__(self, *, roll: float=None, yaw: float=None, pitch: float=None, **kwargs) -> None: + super(HeadPose, self).__init__(**kwargs) + self.roll = roll + self.yaw = yaw + self.pitch = pitch + + +class IdentifyCandidate(Model): + """All possible faces that may qualify. + + All required parameters must be populated in order to send to Azure. + + :param person_id: Required. Id of candidate + :type person_id: str + :param confidence: Required. Confidence threshold of identification, used + to judge whether one face belong to one person. The range of + confidenceThreshold is [0, 1] (default specified by algorithm). + :type confidence: float + """ + + _validation = { + 'person_id': {'required': True}, + 'confidence': {'required': True}, + } + + _attribute_map = { + 'person_id': {'key': 'personId', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, *, person_id: str, confidence: float, **kwargs) -> None: + super(IdentifyCandidate, self).__init__(**kwargs) + self.person_id = person_id + self.confidence = confidence + + +class IdentifyRequest(Model): + """Request body for identify face operation. + + All required parameters must be populated in order to send to Azure. + + :param face_ids: Required. Array of query faces faceIds, created by the + Face - Detect. Each of the faces are identified independently. The valid + number of faceIds is between [1, 10]. + :type face_ids: list[str] + :param person_group_id: PersonGroupId of the target person group, created + by PersonGroup - Create. Parameter personGroupId and largePersonGroupId + should not be provided at the same time. + :type person_group_id: str + :param large_person_group_id: LargePersonGroupId of the target large + person group, created by LargePersonGroup - Create. Parameter + personGroupId and largePersonGroupId should not be provided at the same + time. + :type large_person_group_id: str + :param max_num_of_candidates_returned: The range of + maxNumOfCandidatesReturned is between 1 and 5 (default is 1). Default + value: 1 . + :type max_num_of_candidates_returned: int + :param confidence_threshold: Confidence threshold of identification, used + to judge whether one face belong to one person. The range of + confidenceThreshold is [0, 1] (default specified by algorithm). + :type confidence_threshold: float + """ + + _validation = { + 'face_ids': {'required': True, 'max_items': 10}, + 'person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'large_person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'max_num_of_candidates_returned': {'maximum': 5, 'minimum': 1}, + } + + _attribute_map = { + 'face_ids': {'key': 'faceIds', 'type': '[str]'}, + 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, + 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, + 'max_num_of_candidates_returned': {'key': 'maxNumOfCandidatesReturned', 'type': 'int'}, + 'confidence_threshold': {'key': 'confidenceThreshold', 'type': 'float'}, + } + + def __init__(self, *, face_ids, person_group_id: str=None, large_person_group_id: str=None, max_num_of_candidates_returned: int=1, confidence_threshold: float=None, **kwargs) -> None: + super(IdentifyRequest, self).__init__(**kwargs) + self.face_ids = face_ids + self.person_group_id = person_group_id + self.large_person_group_id = large_person_group_id + self.max_num_of_candidates_returned = max_num_of_candidates_returned + self.confidence_threshold = confidence_threshold + + +class IdentifyResult(Model): + """Response body for identify face operation. + + All required parameters must be populated in order to send to Azure. + + :param face_id: Required. FaceId of the query face + :type face_id: str + :param candidates: Required. Identified person candidates for that face + (ranked by confidence). Array size should be no larger than input + maxNumOfCandidatesReturned. If no person is identified, will return an + empty array. + :type candidates: + list[~azure.cognitiveservices.vision.face.models.IdentifyCandidate] + """ + + _validation = { + 'face_id': {'required': True}, + 'candidates': {'required': True}, + } + + _attribute_map = { + 'face_id': {'key': 'faceId', 'type': 'str'}, + 'candidates': {'key': 'candidates', 'type': '[IdentifyCandidate]'}, + } + + def __init__(self, *, face_id: str, candidates, **kwargs) -> None: + super(IdentifyResult, self).__init__(**kwargs) + self.face_id = face_id + self.candidates = candidates + + +class ImageUrl(Model): + """ImageUrl. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. Publicly reachable URL of an image + :type url: str + """ + + _validation = { + 'url': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, *, url: str, **kwargs) -> None: + super(ImageUrl, self).__init__(**kwargs) + self.url = url + + +class LargeFaceList(MetaDataContract): + """Large face list object. + + All required parameters must be populated in order to send to Azure. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + :param large_face_list_id: Required. LargeFaceListId of the target large + face list. + :type large_face_list_id: str + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + 'large_face_list_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + 'large_face_list_id': {'key': 'largeFaceListId', 'type': 'str'}, + } + + def __init__(self, *, large_face_list_id: str, name: str=None, user_data: str=None, recognition_model="recognition_01", **kwargs) -> None: + super(LargeFaceList, self).__init__(name=name, user_data=user_data, recognition_model=recognition_model, **kwargs) + self.large_face_list_id = large_face_list_id + + +class LargePersonGroup(MetaDataContract): + """Large person group object. + + All required parameters must be populated in order to send to Azure. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + :param large_person_group_id: Required. LargePersonGroupId of the target + large person groups + :type large_person_group_id: str + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + 'large_person_group_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, + } + + def __init__(self, *, large_person_group_id: str, name: str=None, user_data: str=None, recognition_model="recognition_01", **kwargs) -> None: + super(LargePersonGroup, self).__init__(name=name, user_data=user_data, recognition_model=recognition_model, **kwargs) + self.large_person_group_id = large_person_group_id + + +class Makeup(Model): + """Properties describing present makeups on a given face. + + :param eye_makeup: A boolean value describing whether eye makeup is + present on a face. + :type eye_makeup: bool + :param lip_makeup: A boolean value describing whether lip makeup is + present on a face. + :type lip_makeup: bool + """ + + _attribute_map = { + 'eye_makeup': {'key': 'eyeMakeup', 'type': 'bool'}, + 'lip_makeup': {'key': 'lipMakeup', 'type': 'bool'}, + } + + def __init__(self, *, eye_makeup: bool=None, lip_makeup: bool=None, **kwargs) -> None: + super(Makeup, self).__init__(**kwargs) + self.eye_makeup = eye_makeup + self.lip_makeup = lip_makeup + + +class Noise(Model): + """Properties describing noise level of the image. + + :param noise_level: An enum value indicating level of noise. Possible + values include: 'Low', 'Medium', 'High' + :type noise_level: str or + ~azure.cognitiveservices.vision.face.models.NoiseLevel + :param value: A number indicating level of noise level ranging from 0 to + 1. [0, 0.25) is under exposure. [0.25, 0.75) is good exposure. [0.75, 1] + is over exposure. [0, 0.3) is low noise level. [0.3, 0.7) is medium noise + level. [0.7, 1] is high noise level. + :type value: float + """ + + _attribute_map = { + 'noise_level': {'key': 'noiseLevel', 'type': 'NoiseLevel'}, + 'value': {'key': 'value', 'type': 'float'}, + } + + def __init__(self, *, noise_level=None, value: float=None, **kwargs) -> None: + super(Noise, self).__init__(**kwargs) + self.noise_level = noise_level + self.value = value + + +class Occlusion(Model): + """Properties describing occlusions on a given face. + + :param forehead_occluded: A boolean value indicating whether forehead is + occluded. + :type forehead_occluded: bool + :param eye_occluded: A boolean value indicating whether eyes are occluded. + :type eye_occluded: bool + :param mouth_occluded: A boolean value indicating whether the mouth is + occluded. + :type mouth_occluded: bool + """ + + _attribute_map = { + 'forehead_occluded': {'key': 'foreheadOccluded', 'type': 'bool'}, + 'eye_occluded': {'key': 'eyeOccluded', 'type': 'bool'}, + 'mouth_occluded': {'key': 'mouthOccluded', 'type': 'bool'}, + } + + def __init__(self, *, forehead_occluded: bool=None, eye_occluded: bool=None, mouth_occluded: bool=None, **kwargs) -> None: + super(Occlusion, self).__init__(**kwargs) + self.forehead_occluded = forehead_occluded + self.eye_occluded = eye_occluded + self.mouth_occluded = mouth_occluded + + +class OperationStatus(Model): + """Operation status object. Operation refers to the asynchronous backend task + including taking a snapshot and applying a snapshot. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. Operation status: notstarted, running, succeeded, + failed. If the operation is requested and waiting to perform, the status + is notstarted. If the operation is ongoing in backend, the status is + running. Status succeeded means the operation is completed successfully, + specifically for snapshot taking operation, it illustrates the snapshot is + well taken and ready to apply, and for snapshot applying operation, it + presents the target object has finished creating by the snapshot and ready + to be used. Status failed is often caused by editing the source object + while taking the snapshot or editing the target object while applying the + snapshot before completion, see the field "message" to check the failure + reason. Possible values include: 'notstarted', 'running', 'succeeded', + 'failed' + :type status: str or + ~azure.cognitiveservices.vision.face.models.OperationStatusType + :param created_time: Required. A combined UTC date and time string that + describes the time when the operation (take or apply a snapshot) is + requested. E.g. 2018-12-25T11:41:02.2331413Z. + :type created_time: datetime + :param last_action_time: A combined UTC date and time string that + describes the last time the operation (take or apply a snapshot) is + actively migrating data. The lastActionTime will keep increasing until the + operation finishes. E.g. 2018-12-25T11:51:27.8705696Z. + :type last_action_time: datetime + :param resource_location: When the operation succeeds successfully, for + snapshot taking operation the snapshot id will be included in this field, + and for snapshot applying operation, the path to get the target object + will be returned in this field. + :type resource_location: str + :param message: Show failure message when operation fails (omitted when + operation succeeds). + :type message: str + """ + + _validation = { + 'status': {'required': True}, + 'created_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'OperationStatusType'}, + 'created_time': {'key': 'createdTime', 'type': 'iso-8601'}, + 'last_action_time': {'key': 'lastActionTime', 'type': 'iso-8601'}, + 'resource_location': {'key': 'resourceLocation', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__(self, *, status, created_time, last_action_time=None, resource_location: str=None, message: str=None, **kwargs) -> None: + super(OperationStatus, self).__init__(**kwargs) + self.status = status + self.created_time = created_time + self.last_action_time = last_action_time + self.resource_location = resource_location + self.message = message + + +class PersistedFace(Model): + """PersonFace object. + + All required parameters must be populated in order to send to Azure. + + :param persisted_face_id: Required. The persistedFaceId of the target + face, which is persisted and will not expire. Different from faceId + created by Face - Detect and will expire in 24 hours after the detection + call. + :type persisted_face_id: str + :param user_data: User-provided data attached to the face. The size limit + is 1KB. + :type user_data: str + """ + + _validation = { + 'persisted_face_id': {'required': True}, + 'user_data': {'max_length': 1024}, + } + + _attribute_map = { + 'persisted_face_id': {'key': 'persistedFaceId', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + } + + def __init__(self, *, persisted_face_id: str, user_data: str=None, **kwargs) -> None: + super(PersistedFace, self).__init__(**kwargs) + self.persisted_face_id = persisted_face_id + self.user_data = user_data + + +class Person(NameAndUserDataContract): + """Person object. + + All required parameters must be populated in order to send to Azure. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param person_id: Required. PersonId of the target face list. + :type person_id: str + :param persisted_face_ids: PersistedFaceIds of registered faces in the + person. These persistedFaceIds are returned from Person - Add a Person + Face, and will not expire. + :type persisted_face_ids: list[str] + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + 'person_id': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'person_id': {'key': 'personId', 'type': 'str'}, + 'persisted_face_ids': {'key': 'persistedFaceIds', 'type': '[str]'}, + } + + def __init__(self, *, person_id: str, name: str=None, user_data: str=None, persisted_face_ids=None, **kwargs) -> None: + super(Person, self).__init__(name=name, user_data=user_data, **kwargs) + self.person_id = person_id + self.persisted_face_ids = persisted_face_ids + + +class PersonGroup(MetaDataContract): + """Person group object. + + All required parameters must be populated in order to send to Azure. + + :param name: User defined name, maximum length is 128. + :type name: str + :param user_data: User specified data. Length should not exceed 16KB. + :type user_data: str + :param recognition_model: Possible values include: 'recognition_01', + 'recognition_02'. Default value: "recognition_01" . + :type recognition_model: str or + ~azure.cognitiveservices.vision.face.models.RecognitionModel + :param person_group_id: Required. PersonGroupId of the target person + group. + :type person_group_id: str + """ + + _validation = { + 'name': {'max_length': 128}, + 'user_data': {'max_length': 16384}, + 'person_group_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, + 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, + } + + def __init__(self, *, person_group_id: str, name: str=None, user_data: str=None, recognition_model="recognition_01", **kwargs) -> None: + super(PersonGroup, self).__init__(name=name, user_data=user_data, recognition_model=recognition_model, **kwargs) + self.person_group_id = person_group_id + + +class SimilarFace(Model): + """Response body for find similar face operation. + + All required parameters must be populated in order to send to Azure. + + :param face_id: FaceId of candidate face when find by faceIds. faceId is + created by Face - Detect and will expire 24 hours after the detection call + :type face_id: str + :param persisted_face_id: PersistedFaceId of candidate face when find by + faceListId. persistedFaceId in face list is persisted and will not expire. + As showed in below response + :type persisted_face_id: str + :param confidence: Required. Similarity confidence of the candidate face. + The higher confidence, the more similar. Range between [0,1]. + :type confidence: float + """ + + _validation = { + 'confidence': {'required': True}, + } + + _attribute_map = { + 'face_id': {'key': 'faceId', 'type': 'str'}, + 'persisted_face_id': {'key': 'persistedFaceId', 'type': 'str'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, *, confidence: float, face_id: str=None, persisted_face_id: str=None, **kwargs) -> None: + super(SimilarFace, self).__init__(**kwargs) + self.face_id = face_id + self.persisted_face_id = persisted_face_id + self.confidence = confidence + + +class Snapshot(Model): + """Snapshot object. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. Snapshot id. + :type id: str + :param account: Required. Azure Cognitive Service Face account id of the + subscriber who created the snapshot by Snapshot - Take. + :type account: str + :param type: Required. Type of the source object in the snapshot, + specified by the subscriber who created the snapshot when calling Snapshot + - Take. Currently FaceList, PersonGroup, LargeFaceList and + LargePersonGroup are supported. Possible values include: 'FaceList', + 'LargeFaceList', 'LargePersonGroup', 'PersonGroup' + :type type: str or + ~azure.cognitiveservices.vision.face.models.SnapshotObjectType + :param apply_scope: Required. Array of the target Face subscription ids + for the snapshot, specified by the user who created the snapshot when + calling Snapshot - Take. For each snapshot, only subscriptions included in + the applyScope of Snapshot - Take can apply it. + :type apply_scope: list[str] + :param user_data: User specified data about the snapshot for any purpose. + Length should not exceed 16KB. + :type user_data: str + :param created_time: Required. A combined UTC date and time string that + describes the created time of the snapshot. E.g. + 2018-12-25T11:41:02.2331413Z. + :type created_time: datetime + :param last_update_time: Required. A combined UTC date and time string + that describes the last time when the snapshot was created or updated by + Snapshot - Update. E.g. 2018-12-25T11:51:27.8705696Z. + :type last_update_time: datetime + """ + + _validation = { + 'id': {'required': True}, + 'account': {'required': True}, + 'type': {'required': True}, + 'apply_scope': {'required': True}, + 'user_data': {'max_length': 16384}, + 'created_time': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'account': {'key': 'account', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'SnapshotObjectType'}, + 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + 'created_time': {'key': 'createdTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, id: str, account: str, type, apply_scope, created_time, last_update_time, user_data: str=None, **kwargs) -> None: + super(Snapshot, self).__init__(**kwargs) + self.id = id + self.account = account + self.type = type + self.apply_scope = apply_scope + self.user_data = user_data + self.created_time = created_time + self.last_update_time = last_update_time + + +class TakeSnapshotRequest(Model): + """Request body for taking snapshot operation. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. User specified type for the source object to take + snapshot from. Currently FaceList, PersonGroup, LargeFaceList and + LargePersonGroup are supported. Possible values include: 'FaceList', + 'LargeFaceList', 'LargePersonGroup', 'PersonGroup' + :type type: str or + ~azure.cognitiveservices.vision.face.models.SnapshotObjectType + :param object_id: Required. User specified source object id to take + snapshot from. + :type object_id: str + :param apply_scope: Required. User specified array of target Face + subscription ids for the snapshot. For each snapshot, only subscriptions + included in the applyScope of Snapshot - Take can apply it. + :type apply_scope: list[str] + :param user_data: User specified data about the snapshot for any purpose. + Length should not exceed 16KB. + :type user_data: str + """ + + _validation = { + 'type': {'required': True}, + 'object_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'apply_scope': {'required': True}, + 'user_data': {'max_length': 16384}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'SnapshotObjectType'}, + 'object_id': {'key': 'objectId', 'type': 'str'}, + 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + } + + def __init__(self, *, type, object_id: str, apply_scope, user_data: str=None, **kwargs) -> None: + super(TakeSnapshotRequest, self).__init__(**kwargs) + self.type = type + self.object_id = object_id + self.apply_scope = apply_scope + self.user_data = user_data + + +class TrainingStatus(Model): + """Training status object. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. Training status: notstarted, running, succeeded, + failed. If the training process is waiting to perform, the status is + notstarted. If the training is ongoing, the status is running. Status + succeed means this person group or large person group is ready for Face - + Identify, or this large face list is ready for Face - Find Similar. Status + failed is often caused by no person or no persisted face exist in the + person group or large person group, or no persisted face exist in the + large face list. Possible values include: 'nonstarted', 'running', + 'succeeded', 'failed' + :type status: str or + ~azure.cognitiveservices.vision.face.models.TrainingStatusType + :param created: Required. A combined UTC date and time string that + describes the created time of the person group, large person group or + large face list. + :type created: datetime + :param last_action: A combined UTC date and time string that describes the + last modify time of the person group, large person group or large face + list, could be null value when the group is not successfully trained. + :type last_action: datetime + :param last_successful_training: A combined UTC date and time string that + describes the last successful training time of the person group, large + person group or large face list. + :type last_successful_training: datetime + :param message: Show failure message when training failed (omitted when + training succeed). + :type message: str + """ + + _validation = { + 'status': {'required': True}, + 'created': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TrainingStatusType'}, + 'created': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'last_action': {'key': 'lastActionDateTime', 'type': 'iso-8601'}, + 'last_successful_training': {'key': 'lastSuccessfulTrainingDateTime', 'type': 'iso-8601'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__(self, *, status, created, last_action=None, last_successful_training=None, message: str=None, **kwargs) -> None: + super(TrainingStatus, self).__init__(**kwargs) + self.status = status + self.created = created + self.last_action = last_action + self.last_successful_training = last_successful_training + self.message = message + + +class UpdateFaceRequest(Model): + """Request to update face data. + + :param user_data: User-provided data attached to the face. The size limit + is 1KB. + :type user_data: str + """ + + _validation = { + 'user_data': {'max_length': 1024}, + } + + _attribute_map = { + 'user_data': {'key': 'userData', 'type': 'str'}, + } + + def __init__(self, *, user_data: str=None, **kwargs) -> None: + super(UpdateFaceRequest, self).__init__(**kwargs) + self.user_data = user_data + + +class UpdateSnapshotRequest(Model): + """Request body for updating a snapshot, with a combination of user defined + apply scope and user specified data. + + :param apply_scope: Array of the target Face subscription ids for the + snapshot, specified by the user who created the snapshot when calling + Snapshot - Take. For each snapshot, only subscriptions included in the + applyScope of Snapshot - Take can apply it. + :type apply_scope: list[str] + :param user_data: User specified data about the snapshot for any purpose. + Length should not exceed 16KB. + :type user_data: str + """ + + _validation = { + 'user_data': {'max_length': 16384}, + } + + _attribute_map = { + 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, + 'user_data': {'key': 'userData', 'type': 'str'}, + } + + def __init__(self, *, apply_scope=None, user_data: str=None, **kwargs) -> None: + super(UpdateSnapshotRequest, self).__init__(**kwargs) + self.apply_scope = apply_scope + self.user_data = user_data + + +class VerifyFaceToFaceRequest(Model): + """Request body for face to face verification. + + All required parameters must be populated in order to send to Azure. + + :param face_id1: Required. FaceId of the first face, comes from Face - + Detect + :type face_id1: str + :param face_id2: Required. FaceId of the second face, comes from Face - + Detect + :type face_id2: str + """ + + _validation = { + 'face_id1': {'required': True}, + 'face_id2': {'required': True}, + } + + _attribute_map = { + 'face_id1': {'key': 'faceId1', 'type': 'str'}, + 'face_id2': {'key': 'faceId2', 'type': 'str'}, + } + + def __init__(self, *, face_id1: str, face_id2: str, **kwargs) -> None: + super(VerifyFaceToFaceRequest, self).__init__(**kwargs) + self.face_id1 = face_id1 + self.face_id2 = face_id2 + + +class VerifyFaceToPersonRequest(Model): + """Request body for face to person verification. + + All required parameters must be populated in order to send to Azure. + + :param face_id: Required. FaceId of the face, comes from Face - Detect + :type face_id: str + :param person_group_id: Using existing personGroupId and personId for fast + loading a specified person. personGroupId is created in PersonGroup - + Create. Parameter personGroupId and largePersonGroupId should not be + provided at the same time. + :type person_group_id: str + :param large_person_group_id: Using existing largePersonGroupId and + personId for fast loading a specified person. largePersonGroupId is + created in LargePersonGroup - Create. Parameter personGroupId and + largePersonGroupId should not be provided at the same time. + :type large_person_group_id: str + :param person_id: Required. Specify a certain person in a person group or + a large person group. personId is created in PersonGroup Person - Create + or LargePersonGroup Person - Create. + :type person_id: str + """ + + _validation = { + 'face_id': {'required': True}, + 'person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'large_person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, + 'person_id': {'required': True}, + } + + _attribute_map = { + 'face_id': {'key': 'faceId', 'type': 'str'}, + 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, + 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, + 'person_id': {'key': 'personId', 'type': 'str'}, + } + + def __init__(self, *, face_id: str, person_id: str, person_group_id: str=None, large_person_group_id: str=None, **kwargs) -> None: + super(VerifyFaceToPersonRequest, self).__init__(**kwargs) + self.face_id = face_id + self.person_group_id = person_group_id + self.large_person_group_id = large_person_group_id + self.person_id = person_id + + +class VerifyResult(Model): + """Result of the verify operation. + + All required parameters must be populated in order to send to Azure. + + :param is_identical: Required. True if the two faces belong to the same + person or the face belongs to the person, otherwise false. + :type is_identical: bool + :param confidence: Required. A number indicates the similarity confidence + of whether two faces belong to the same person, or whether the face + belongs to the person. By default, isIdentical is set to True if + similarity confidence is greater than or equal to 0.5. This is useful for + advanced users to override "isIdentical" and fine-tune the result on their + own data. + :type confidence: float + """ + + _validation = { + 'is_identical': {'required': True}, + 'confidence': {'required': True}, + } + + _attribute_map = { + 'is_identical': {'key': 'isIdentical', 'type': 'bool'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, + } + + def __init__(self, *, is_identical: bool, confidence: float, **kwargs) -> None: + super(VerifyResult, self).__init__(**kwargs) + self.is_identical = is_identical + self.confidence = confidence diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/accessory.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/accessory.py deleted file mode 100644 index b86acc571c10..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/accessory.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Accessory(Model): - """Accessory item and corresponding confidence level. - - :param type: Type of an accessory. Possible values include: 'headWear', - 'glasses', 'mask' - :type type: str or - ~azure.cognitiveservices.vision.face.models.AccessoryType - :param confidence: Confidence level of an accessory - :type confidence: float - """ - - _attribute_map = { - 'type': {'key': 'type', 'type': 'AccessoryType'}, - 'confidence': {'key': 'confidence', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(Accessory, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.confidence = kwargs.get('confidence', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/accessory_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/accessory_py3.py deleted file mode 100644 index 76a6b68edbbd..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/accessory_py3.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Accessory(Model): - """Accessory item and corresponding confidence level. - - :param type: Type of an accessory. Possible values include: 'headWear', - 'glasses', 'mask' - :type type: str or - ~azure.cognitiveservices.vision.face.models.AccessoryType - :param confidence: Confidence level of an accessory - :type confidence: float - """ - - _attribute_map = { - 'type': {'key': 'type', 'type': 'AccessoryType'}, - 'confidence': {'key': 'confidence', 'type': 'float'}, - } - - def __init__(self, *, type=None, confidence: float=None, **kwargs) -> None: - super(Accessory, self).__init__(**kwargs) - self.type = type - self.confidence = confidence diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/api_error.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/api_error.py deleted file mode 100644 index 79e8c1765e4b..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/api_error.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from msrest.exceptions import HttpOperationError - - -class APIError(Model): - """Error information returned by the API. - - :param error: - :type error: ~azure.cognitiveservices.vision.face.models.Error - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'Error'}, - } - - def __init__(self, **kwargs): - super(APIError, self).__init__(**kwargs) - self.error = kwargs.get('error', None) - - -class APIErrorException(HttpOperationError): - """Server responsed with exception of type: 'APIError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, deserialize, response, *args): - - super(APIErrorException, self).__init__(deserialize, response, 'APIError', *args) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/api_error_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/api_error_py3.py deleted file mode 100644 index 4e362714807d..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/api_error_py3.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from msrest.exceptions import HttpOperationError - - -class APIError(Model): - """Error information returned by the API. - - :param error: - :type error: ~azure.cognitiveservices.vision.face.models.Error - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'Error'}, - } - - def __init__(self, *, error=None, **kwargs) -> None: - super(APIError, self).__init__(**kwargs) - self.error = error - - -class APIErrorException(HttpOperationError): - """Server responsed with exception of type: 'APIError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, deserialize, response, *args): - - super(APIErrorException, self).__init__(deserialize, response, 'APIError', *args) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/apply_snapshot_request.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/apply_snapshot_request.py deleted file mode 100644 index 6596c9ecb017..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/apply_snapshot_request.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class ApplySnapshotRequest(Model): - """Request body for applying snapshot operation. - - All required parameters must be populated in order to send to Azure. - - :param object_id: Required. User specified target object id to be created - from the snapshot. - :type object_id: str - :param mode: Snapshot applying mode. Currently only CreateNew is - supported, which means the apply operation will fail if target - subscription already contains an object of same type and using the same - objectId. Users can specify the "objectId" in request body to avoid such - conflicts. Possible values include: 'CreateNew'. Default value: - "CreateNew" . - :type mode: str or - ~azure.cognitiveservices.vision.face.models.SnapshotApplyMode - """ - - _validation = { - 'object_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - } - - _attribute_map = { - 'object_id': {'key': 'objectId', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'SnapshotApplyMode'}, - } - - def __init__(self, **kwargs): - super(ApplySnapshotRequest, self).__init__(**kwargs) - self.object_id = kwargs.get('object_id', None) - self.mode = kwargs.get('mode', "CreateNew") diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/apply_snapshot_request_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/apply_snapshot_request_py3.py deleted file mode 100644 index 7c97f1084cf5..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/apply_snapshot_request_py3.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class ApplySnapshotRequest(Model): - """Request body for applying snapshot operation. - - All required parameters must be populated in order to send to Azure. - - :param object_id: Required. User specified target object id to be created - from the snapshot. - :type object_id: str - :param mode: Snapshot applying mode. Currently only CreateNew is - supported, which means the apply operation will fail if target - subscription already contains an object of same type and using the same - objectId. Users can specify the "objectId" in request body to avoid such - conflicts. Possible values include: 'CreateNew'. Default value: - "CreateNew" . - :type mode: str or - ~azure.cognitiveservices.vision.face.models.SnapshotApplyMode - """ - - _validation = { - 'object_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - } - - _attribute_map = { - 'object_id': {'key': 'objectId', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'SnapshotApplyMode'}, - } - - def __init__(self, *, object_id: str, mode="CreateNew", **kwargs) -> None: - super(ApplySnapshotRequest, self).__init__(**kwargs) - self.object_id = object_id - self.mode = mode diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/blur.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/blur.py deleted file mode 100644 index f7dead76fcf1..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/blur.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Blur(Model): - """Properties describing any presence of blur within the image. - - :param blur_level: An enum value indicating level of blurriness. Possible - values include: 'Low', 'Medium', 'High' - :type blur_level: str or - ~azure.cognitiveservices.vision.face.models.BlurLevel - :param value: A number indicating level of blurriness ranging from 0 to 1. - :type value: float - """ - - _attribute_map = { - 'blur_level': {'key': 'blurLevel', 'type': 'BlurLevel'}, - 'value': {'key': 'value', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(Blur, self).__init__(**kwargs) - self.blur_level = kwargs.get('blur_level', None) - self.value = kwargs.get('value', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/blur_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/blur_py3.py deleted file mode 100644 index db3c6f5860af..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/blur_py3.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Blur(Model): - """Properties describing any presence of blur within the image. - - :param blur_level: An enum value indicating level of blurriness. Possible - values include: 'Low', 'Medium', 'High' - :type blur_level: str or - ~azure.cognitiveservices.vision.face.models.BlurLevel - :param value: A number indicating level of blurriness ranging from 0 to 1. - :type value: float - """ - - _attribute_map = { - 'blur_level': {'key': 'blurLevel', 'type': 'BlurLevel'}, - 'value': {'key': 'value', 'type': 'float'}, - } - - def __init__(self, *, blur_level=None, value: float=None, **kwargs) -> None: - super(Blur, self).__init__(**kwargs) - self.blur_level = blur_level - self.value = value diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/coordinate.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/coordinate.py deleted file mode 100644 index c786707ccfb9..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/coordinate.py +++ /dev/null @@ -1,39 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Coordinate(Model): - """Coordinates within an image. - - All required parameters must be populated in order to send to Azure. - - :param x: Required. The horizontal component, in pixels. - :type x: float - :param y: Required. The vertical component, in pixels. - :type y: float - """ - - _validation = { - 'x': {'required': True}, - 'y': {'required': True}, - } - - _attribute_map = { - 'x': {'key': 'x', 'type': 'float'}, - 'y': {'key': 'y', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(Coordinate, self).__init__(**kwargs) - self.x = kwargs.get('x', None) - self.y = kwargs.get('y', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/coordinate_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/coordinate_py3.py deleted file mode 100644 index 5068b2380dd5..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/coordinate_py3.py +++ /dev/null @@ -1,39 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Coordinate(Model): - """Coordinates within an image. - - All required parameters must be populated in order to send to Azure. - - :param x: Required. The horizontal component, in pixels. - :type x: float - :param y: Required. The vertical component, in pixels. - :type y: float - """ - - _validation = { - 'x': {'required': True}, - 'y': {'required': True}, - } - - _attribute_map = { - 'x': {'key': 'x', 'type': 'float'}, - 'y': {'key': 'y', 'type': 'float'}, - } - - def __init__(self, *, x: float, y: float, **kwargs) -> None: - super(Coordinate, self).__init__(**kwargs) - self.x = x - self.y = y diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/detected_face.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/detected_face.py deleted file mode 100644 index 04778024ae05..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/detected_face.py +++ /dev/null @@ -1,55 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DetectedFace(Model): - """Detected Face object. - - All required parameters must be populated in order to send to Azure. - - :param face_id: - :type face_id: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - :param face_rectangle: Required. - :type face_rectangle: - ~azure.cognitiveservices.vision.face.models.FaceRectangle - :param face_landmarks: - :type face_landmarks: - ~azure.cognitiveservices.vision.face.models.FaceLandmarks - :param face_attributes: - :type face_attributes: - ~azure.cognitiveservices.vision.face.models.FaceAttributes - """ - - _validation = { - 'face_rectangle': {'required': True}, - } - - _attribute_map = { - 'face_id': {'key': 'faceId', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, - 'face_landmarks': {'key': 'faceLandmarks', 'type': 'FaceLandmarks'}, - 'face_attributes': {'key': 'faceAttributes', 'type': 'FaceAttributes'}, - } - - def __init__(self, **kwargs): - super(DetectedFace, self).__init__(**kwargs) - self.face_id = kwargs.get('face_id', None) - self.recognition_model = kwargs.get('recognition_model', "recognition_01") - self.face_rectangle = kwargs.get('face_rectangle', None) - self.face_landmarks = kwargs.get('face_landmarks', None) - self.face_attributes = kwargs.get('face_attributes', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/detected_face_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/detected_face_py3.py deleted file mode 100644 index 964c0602f91c..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/detected_face_py3.py +++ /dev/null @@ -1,55 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DetectedFace(Model): - """Detected Face object. - - All required parameters must be populated in order to send to Azure. - - :param face_id: - :type face_id: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - :param face_rectangle: Required. - :type face_rectangle: - ~azure.cognitiveservices.vision.face.models.FaceRectangle - :param face_landmarks: - :type face_landmarks: - ~azure.cognitiveservices.vision.face.models.FaceLandmarks - :param face_attributes: - :type face_attributes: - ~azure.cognitiveservices.vision.face.models.FaceAttributes - """ - - _validation = { - 'face_rectangle': {'required': True}, - } - - _attribute_map = { - 'face_id': {'key': 'faceId', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - 'face_rectangle': {'key': 'faceRectangle', 'type': 'FaceRectangle'}, - 'face_landmarks': {'key': 'faceLandmarks', 'type': 'FaceLandmarks'}, - 'face_attributes': {'key': 'faceAttributes', 'type': 'FaceAttributes'}, - } - - def __init__(self, *, face_rectangle, face_id: str=None, recognition_model="recognition_01", face_landmarks=None, face_attributes=None, **kwargs) -> None: - super(DetectedFace, self).__init__(**kwargs) - self.face_id = face_id - self.recognition_model = recognition_model - self.face_rectangle = face_rectangle - self.face_landmarks = face_landmarks - self.face_attributes = face_attributes diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/emotion.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/emotion.py deleted file mode 100644 index bd8a42b2306a..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/emotion.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Emotion(Model): - """Properties describing facial emotion in form of confidence ranging from 0 - to 1. - - :param anger: - :type anger: float - :param contempt: - :type contempt: float - :param disgust: - :type disgust: float - :param fear: - :type fear: float - :param happiness: - :type happiness: float - :param neutral: - :type neutral: float - :param sadness: - :type sadness: float - :param surprise: - :type surprise: float - """ - - _attribute_map = { - 'anger': {'key': 'anger', 'type': 'float'}, - 'contempt': {'key': 'contempt', 'type': 'float'}, - 'disgust': {'key': 'disgust', 'type': 'float'}, - 'fear': {'key': 'fear', 'type': 'float'}, - 'happiness': {'key': 'happiness', 'type': 'float'}, - 'neutral': {'key': 'neutral', 'type': 'float'}, - 'sadness': {'key': 'sadness', 'type': 'float'}, - 'surprise': {'key': 'surprise', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(Emotion, self).__init__(**kwargs) - self.anger = kwargs.get('anger', None) - self.contempt = kwargs.get('contempt', None) - self.disgust = kwargs.get('disgust', None) - self.fear = kwargs.get('fear', None) - self.happiness = kwargs.get('happiness', None) - self.neutral = kwargs.get('neutral', None) - self.sadness = kwargs.get('sadness', None) - self.surprise = kwargs.get('surprise', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/emotion_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/emotion_py3.py deleted file mode 100644 index 552f1b193389..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/emotion_py3.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Emotion(Model): - """Properties describing facial emotion in form of confidence ranging from 0 - to 1. - - :param anger: - :type anger: float - :param contempt: - :type contempt: float - :param disgust: - :type disgust: float - :param fear: - :type fear: float - :param happiness: - :type happiness: float - :param neutral: - :type neutral: float - :param sadness: - :type sadness: float - :param surprise: - :type surprise: float - """ - - _attribute_map = { - 'anger': {'key': 'anger', 'type': 'float'}, - 'contempt': {'key': 'contempt', 'type': 'float'}, - 'disgust': {'key': 'disgust', 'type': 'float'}, - 'fear': {'key': 'fear', 'type': 'float'}, - 'happiness': {'key': 'happiness', 'type': 'float'}, - 'neutral': {'key': 'neutral', 'type': 'float'}, - 'sadness': {'key': 'sadness', 'type': 'float'}, - 'surprise': {'key': 'surprise', 'type': 'float'}, - } - - def __init__(self, *, anger: float=None, contempt: float=None, disgust: float=None, fear: float=None, happiness: float=None, neutral: float=None, sadness: float=None, surprise: float=None, **kwargs) -> None: - super(Emotion, self).__init__(**kwargs) - self.anger = anger - self.contempt = contempt - self.disgust = disgust - self.fear = fear - self.happiness = happiness - self.neutral = neutral - self.sadness = sadness - self.surprise = surprise diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/error.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/error.py deleted file mode 100644 index a41106cfaca6..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/error.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Error(Model): - """Error body. - - :param code: - :type code: str - :param message: - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(Error, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/error_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/error_py3.py deleted file mode 100644 index 08e8cb04c44d..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/error_py3.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Error(Model): - """Error body. - - :param code: - :type code: str - :param message: - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - } - - def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None: - super(Error, self).__init__(**kwargs) - self.code = code - self.message = message diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/exposure.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/exposure.py deleted file mode 100644 index 07c7359c6dab..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/exposure.py +++ /dev/null @@ -1,36 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Exposure(Model): - """Properties describing exposure level of the image. - - :param exposure_level: An enum value indicating level of exposure. - Possible values include: 'UnderExposure', 'GoodExposure', 'OverExposure' - :type exposure_level: str or - ~azure.cognitiveservices.vision.face.models.ExposureLevel - :param value: A number indicating level of exposure level ranging from 0 - to 1. [0, 0.25) is under exposure. [0.25, 0.75) is good exposure. [0.75, - 1] is over exposure. - :type value: float - """ - - _attribute_map = { - 'exposure_level': {'key': 'exposureLevel', 'type': 'ExposureLevel'}, - 'value': {'key': 'value', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(Exposure, self).__init__(**kwargs) - self.exposure_level = kwargs.get('exposure_level', None) - self.value = kwargs.get('value', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/exposure_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/exposure_py3.py deleted file mode 100644 index efff64344121..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/exposure_py3.py +++ /dev/null @@ -1,36 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Exposure(Model): - """Properties describing exposure level of the image. - - :param exposure_level: An enum value indicating level of exposure. - Possible values include: 'UnderExposure', 'GoodExposure', 'OverExposure' - :type exposure_level: str or - ~azure.cognitiveservices.vision.face.models.ExposureLevel - :param value: A number indicating level of exposure level ranging from 0 - to 1. [0, 0.25) is under exposure. [0.25, 0.75) is good exposure. [0.75, - 1] is over exposure. - :type value: float - """ - - _attribute_map = { - 'exposure_level': {'key': 'exposureLevel', 'type': 'ExposureLevel'}, - 'value': {'key': 'value', 'type': 'float'}, - } - - def __init__(self, *, exposure_level=None, value: float=None, **kwargs) -> None: - super(Exposure, self).__init__(**kwargs) - self.exposure_level = exposure_level - self.value = value diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_attributes.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_attributes.py deleted file mode 100644 index eaa95741ae86..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_attributes.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class FaceAttributes(Model): - """Face Attributes. - - :param age: Age in years - :type age: float - :param gender: Possible gender of the face. Possible values include: - 'male', 'female' - :type gender: str or ~azure.cognitiveservices.vision.face.models.Gender - :param smile: Smile intensity, a number between [0,1] - :type smile: float - :param facial_hair: Properties describing facial hair attributes. - :type facial_hair: ~azure.cognitiveservices.vision.face.models.FacialHair - :param glasses: Glasses type if any of the face. Possible values include: - 'noGlasses', 'readingGlasses', 'sunglasses', 'swimmingGoggles' - :type glasses: str or - ~azure.cognitiveservices.vision.face.models.GlassesType - :param head_pose: Properties indicating head pose of the face. - :type head_pose: ~azure.cognitiveservices.vision.face.models.HeadPose - :param emotion: Properties describing facial emotion in form of confidence - ranging from 0 to 1. - :type emotion: ~azure.cognitiveservices.vision.face.models.Emotion - :param hair: Properties describing hair attributes. - :type hair: ~azure.cognitiveservices.vision.face.models.Hair - :param makeup: Properties describing present makeups on a given face. - :type makeup: ~azure.cognitiveservices.vision.face.models.Makeup - :param occlusion: Properties describing occlusions on a given face. - :type occlusion: ~azure.cognitiveservices.vision.face.models.Occlusion - :param accessories: Properties describing any accessories on a given face. - :type accessories: - list[~azure.cognitiveservices.vision.face.models.Accessory] - :param blur: Properties describing any presence of blur within the image. - :type blur: ~azure.cognitiveservices.vision.face.models.Blur - :param exposure: Properties describing exposure level of the image. - :type exposure: ~azure.cognitiveservices.vision.face.models.Exposure - :param noise: Properties describing noise level of the image. - :type noise: ~azure.cognitiveservices.vision.face.models.Noise - """ - - _attribute_map = { - 'age': {'key': 'age', 'type': 'float'}, - 'gender': {'key': 'gender', 'type': 'Gender'}, - 'smile': {'key': 'smile', 'type': 'float'}, - 'facial_hair': {'key': 'facialHair', 'type': 'FacialHair'}, - 'glasses': {'key': 'glasses', 'type': 'GlassesType'}, - 'head_pose': {'key': 'headPose', 'type': 'HeadPose'}, - 'emotion': {'key': 'emotion', 'type': 'Emotion'}, - 'hair': {'key': 'hair', 'type': 'Hair'}, - 'makeup': {'key': 'makeup', 'type': 'Makeup'}, - 'occlusion': {'key': 'occlusion', 'type': 'Occlusion'}, - 'accessories': {'key': 'accessories', 'type': '[Accessory]'}, - 'blur': {'key': 'blur', 'type': 'Blur'}, - 'exposure': {'key': 'exposure', 'type': 'Exposure'}, - 'noise': {'key': 'noise', 'type': 'Noise'}, - } - - def __init__(self, **kwargs): - super(FaceAttributes, self).__init__(**kwargs) - self.age = kwargs.get('age', None) - self.gender = kwargs.get('gender', None) - self.smile = kwargs.get('smile', None) - self.facial_hair = kwargs.get('facial_hair', None) - self.glasses = kwargs.get('glasses', None) - self.head_pose = kwargs.get('head_pose', None) - self.emotion = kwargs.get('emotion', None) - self.hair = kwargs.get('hair', None) - self.makeup = kwargs.get('makeup', None) - self.occlusion = kwargs.get('occlusion', None) - self.accessories = kwargs.get('accessories', None) - self.blur = kwargs.get('blur', None) - self.exposure = kwargs.get('exposure', None) - self.noise = kwargs.get('noise', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_attributes_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_attributes_py3.py deleted file mode 100644 index 6b03f1c9eb9a..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_attributes_py3.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class FaceAttributes(Model): - """Face Attributes. - - :param age: Age in years - :type age: float - :param gender: Possible gender of the face. Possible values include: - 'male', 'female' - :type gender: str or ~azure.cognitiveservices.vision.face.models.Gender - :param smile: Smile intensity, a number between [0,1] - :type smile: float - :param facial_hair: Properties describing facial hair attributes. - :type facial_hair: ~azure.cognitiveservices.vision.face.models.FacialHair - :param glasses: Glasses type if any of the face. Possible values include: - 'noGlasses', 'readingGlasses', 'sunglasses', 'swimmingGoggles' - :type glasses: str or - ~azure.cognitiveservices.vision.face.models.GlassesType - :param head_pose: Properties indicating head pose of the face. - :type head_pose: ~azure.cognitiveservices.vision.face.models.HeadPose - :param emotion: Properties describing facial emotion in form of confidence - ranging from 0 to 1. - :type emotion: ~azure.cognitiveservices.vision.face.models.Emotion - :param hair: Properties describing hair attributes. - :type hair: ~azure.cognitiveservices.vision.face.models.Hair - :param makeup: Properties describing present makeups on a given face. - :type makeup: ~azure.cognitiveservices.vision.face.models.Makeup - :param occlusion: Properties describing occlusions on a given face. - :type occlusion: ~azure.cognitiveservices.vision.face.models.Occlusion - :param accessories: Properties describing any accessories on a given face. - :type accessories: - list[~azure.cognitiveservices.vision.face.models.Accessory] - :param blur: Properties describing any presence of blur within the image. - :type blur: ~azure.cognitiveservices.vision.face.models.Blur - :param exposure: Properties describing exposure level of the image. - :type exposure: ~azure.cognitiveservices.vision.face.models.Exposure - :param noise: Properties describing noise level of the image. - :type noise: ~azure.cognitiveservices.vision.face.models.Noise - """ - - _attribute_map = { - 'age': {'key': 'age', 'type': 'float'}, - 'gender': {'key': 'gender', 'type': 'Gender'}, - 'smile': {'key': 'smile', 'type': 'float'}, - 'facial_hair': {'key': 'facialHair', 'type': 'FacialHair'}, - 'glasses': {'key': 'glasses', 'type': 'GlassesType'}, - 'head_pose': {'key': 'headPose', 'type': 'HeadPose'}, - 'emotion': {'key': 'emotion', 'type': 'Emotion'}, - 'hair': {'key': 'hair', 'type': 'Hair'}, - 'makeup': {'key': 'makeup', 'type': 'Makeup'}, - 'occlusion': {'key': 'occlusion', 'type': 'Occlusion'}, - 'accessories': {'key': 'accessories', 'type': '[Accessory]'}, - 'blur': {'key': 'blur', 'type': 'Blur'}, - 'exposure': {'key': 'exposure', 'type': 'Exposure'}, - 'noise': {'key': 'noise', 'type': 'Noise'}, - } - - def __init__(self, *, age: float=None, gender=None, smile: float=None, facial_hair=None, glasses=None, head_pose=None, emotion=None, hair=None, makeup=None, occlusion=None, accessories=None, blur=None, exposure=None, noise=None, **kwargs) -> None: - super(FaceAttributes, self).__init__(**kwargs) - self.age = age - self.gender = gender - self.smile = smile - self.facial_hair = facial_hair - self.glasses = glasses - self.head_pose = head_pose - self.emotion = emotion - self.hair = hair - self.makeup = makeup - self.occlusion = occlusion - self.accessories = accessories - self.blur = blur - self.exposure = exposure - self.noise = noise diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_landmarks.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_landmarks.py deleted file mode 100644 index 7e385f435b69..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_landmarks.py +++ /dev/null @@ -1,154 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class FaceLandmarks(Model): - """A collection of 27-point face landmarks pointing to the important positions - of face components. - - :param pupil_left: - :type pupil_left: ~azure.cognitiveservices.vision.face.models.Coordinate - :param pupil_right: - :type pupil_right: ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_tip: - :type nose_tip: ~azure.cognitiveservices.vision.face.models.Coordinate - :param mouth_left: - :type mouth_left: ~azure.cognitiveservices.vision.face.models.Coordinate - :param mouth_right: - :type mouth_right: ~azure.cognitiveservices.vision.face.models.Coordinate - :param eyebrow_left_outer: - :type eyebrow_left_outer: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eyebrow_left_inner: - :type eyebrow_left_inner: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_left_outer: - :type eye_left_outer: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_left_top: - :type eye_left_top: ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_left_bottom: - :type eye_left_bottom: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_left_inner: - :type eye_left_inner: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eyebrow_right_inner: - :type eyebrow_right_inner: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eyebrow_right_outer: - :type eyebrow_right_outer: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_right_inner: - :type eye_right_inner: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_right_top: - :type eye_right_top: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_right_bottom: - :type eye_right_bottom: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_right_outer: - :type eye_right_outer: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_root_left: - :type nose_root_left: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_root_right: - :type nose_root_right: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_left_alar_top: - :type nose_left_alar_top: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_right_alar_top: - :type nose_right_alar_top: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_left_alar_out_tip: - :type nose_left_alar_out_tip: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_right_alar_out_tip: - :type nose_right_alar_out_tip: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param upper_lip_top: - :type upper_lip_top: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param upper_lip_bottom: - :type upper_lip_bottom: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param under_lip_top: - :type under_lip_top: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param under_lip_bottom: - :type under_lip_bottom: - ~azure.cognitiveservices.vision.face.models.Coordinate - """ - - _attribute_map = { - 'pupil_left': {'key': 'pupilLeft', 'type': 'Coordinate'}, - 'pupil_right': {'key': 'pupilRight', 'type': 'Coordinate'}, - 'nose_tip': {'key': 'noseTip', 'type': 'Coordinate'}, - 'mouth_left': {'key': 'mouthLeft', 'type': 'Coordinate'}, - 'mouth_right': {'key': 'mouthRight', 'type': 'Coordinate'}, - 'eyebrow_left_outer': {'key': 'eyebrowLeftOuter', 'type': 'Coordinate'}, - 'eyebrow_left_inner': {'key': 'eyebrowLeftInner', 'type': 'Coordinate'}, - 'eye_left_outer': {'key': 'eyeLeftOuter', 'type': 'Coordinate'}, - 'eye_left_top': {'key': 'eyeLeftTop', 'type': 'Coordinate'}, - 'eye_left_bottom': {'key': 'eyeLeftBottom', 'type': 'Coordinate'}, - 'eye_left_inner': {'key': 'eyeLeftInner', 'type': 'Coordinate'}, - 'eyebrow_right_inner': {'key': 'eyebrowRightInner', 'type': 'Coordinate'}, - 'eyebrow_right_outer': {'key': 'eyebrowRightOuter', 'type': 'Coordinate'}, - 'eye_right_inner': {'key': 'eyeRightInner', 'type': 'Coordinate'}, - 'eye_right_top': {'key': 'eyeRightTop', 'type': 'Coordinate'}, - 'eye_right_bottom': {'key': 'eyeRightBottom', 'type': 'Coordinate'}, - 'eye_right_outer': {'key': 'eyeRightOuter', 'type': 'Coordinate'}, - 'nose_root_left': {'key': 'noseRootLeft', 'type': 'Coordinate'}, - 'nose_root_right': {'key': 'noseRootRight', 'type': 'Coordinate'}, - 'nose_left_alar_top': {'key': 'noseLeftAlarTop', 'type': 'Coordinate'}, - 'nose_right_alar_top': {'key': 'noseRightAlarTop', 'type': 'Coordinate'}, - 'nose_left_alar_out_tip': {'key': 'noseLeftAlarOutTip', 'type': 'Coordinate'}, - 'nose_right_alar_out_tip': {'key': 'noseRightAlarOutTip', 'type': 'Coordinate'}, - 'upper_lip_top': {'key': 'upperLipTop', 'type': 'Coordinate'}, - 'upper_lip_bottom': {'key': 'upperLipBottom', 'type': 'Coordinate'}, - 'under_lip_top': {'key': 'underLipTop', 'type': 'Coordinate'}, - 'under_lip_bottom': {'key': 'underLipBottom', 'type': 'Coordinate'}, - } - - def __init__(self, **kwargs): - super(FaceLandmarks, self).__init__(**kwargs) - self.pupil_left = kwargs.get('pupil_left', None) - self.pupil_right = kwargs.get('pupil_right', None) - self.nose_tip = kwargs.get('nose_tip', None) - self.mouth_left = kwargs.get('mouth_left', None) - self.mouth_right = kwargs.get('mouth_right', None) - self.eyebrow_left_outer = kwargs.get('eyebrow_left_outer', None) - self.eyebrow_left_inner = kwargs.get('eyebrow_left_inner', None) - self.eye_left_outer = kwargs.get('eye_left_outer', None) - self.eye_left_top = kwargs.get('eye_left_top', None) - self.eye_left_bottom = kwargs.get('eye_left_bottom', None) - self.eye_left_inner = kwargs.get('eye_left_inner', None) - self.eyebrow_right_inner = kwargs.get('eyebrow_right_inner', None) - self.eyebrow_right_outer = kwargs.get('eyebrow_right_outer', None) - self.eye_right_inner = kwargs.get('eye_right_inner', None) - self.eye_right_top = kwargs.get('eye_right_top', None) - self.eye_right_bottom = kwargs.get('eye_right_bottom', None) - self.eye_right_outer = kwargs.get('eye_right_outer', None) - self.nose_root_left = kwargs.get('nose_root_left', None) - self.nose_root_right = kwargs.get('nose_root_right', None) - self.nose_left_alar_top = kwargs.get('nose_left_alar_top', None) - self.nose_right_alar_top = kwargs.get('nose_right_alar_top', None) - self.nose_left_alar_out_tip = kwargs.get('nose_left_alar_out_tip', None) - self.nose_right_alar_out_tip = kwargs.get('nose_right_alar_out_tip', None) - self.upper_lip_top = kwargs.get('upper_lip_top', None) - self.upper_lip_bottom = kwargs.get('upper_lip_bottom', None) - self.under_lip_top = kwargs.get('under_lip_top', None) - self.under_lip_bottom = kwargs.get('under_lip_bottom', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_landmarks_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_landmarks_py3.py deleted file mode 100644 index 3bffe97ff181..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_landmarks_py3.py +++ /dev/null @@ -1,154 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class FaceLandmarks(Model): - """A collection of 27-point face landmarks pointing to the important positions - of face components. - - :param pupil_left: - :type pupil_left: ~azure.cognitiveservices.vision.face.models.Coordinate - :param pupil_right: - :type pupil_right: ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_tip: - :type nose_tip: ~azure.cognitiveservices.vision.face.models.Coordinate - :param mouth_left: - :type mouth_left: ~azure.cognitiveservices.vision.face.models.Coordinate - :param mouth_right: - :type mouth_right: ~azure.cognitiveservices.vision.face.models.Coordinate - :param eyebrow_left_outer: - :type eyebrow_left_outer: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eyebrow_left_inner: - :type eyebrow_left_inner: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_left_outer: - :type eye_left_outer: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_left_top: - :type eye_left_top: ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_left_bottom: - :type eye_left_bottom: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_left_inner: - :type eye_left_inner: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eyebrow_right_inner: - :type eyebrow_right_inner: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eyebrow_right_outer: - :type eyebrow_right_outer: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_right_inner: - :type eye_right_inner: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_right_top: - :type eye_right_top: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_right_bottom: - :type eye_right_bottom: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param eye_right_outer: - :type eye_right_outer: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_root_left: - :type nose_root_left: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_root_right: - :type nose_root_right: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_left_alar_top: - :type nose_left_alar_top: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_right_alar_top: - :type nose_right_alar_top: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_left_alar_out_tip: - :type nose_left_alar_out_tip: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param nose_right_alar_out_tip: - :type nose_right_alar_out_tip: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param upper_lip_top: - :type upper_lip_top: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param upper_lip_bottom: - :type upper_lip_bottom: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param under_lip_top: - :type under_lip_top: - ~azure.cognitiveservices.vision.face.models.Coordinate - :param under_lip_bottom: - :type under_lip_bottom: - ~azure.cognitiveservices.vision.face.models.Coordinate - """ - - _attribute_map = { - 'pupil_left': {'key': 'pupilLeft', 'type': 'Coordinate'}, - 'pupil_right': {'key': 'pupilRight', 'type': 'Coordinate'}, - 'nose_tip': {'key': 'noseTip', 'type': 'Coordinate'}, - 'mouth_left': {'key': 'mouthLeft', 'type': 'Coordinate'}, - 'mouth_right': {'key': 'mouthRight', 'type': 'Coordinate'}, - 'eyebrow_left_outer': {'key': 'eyebrowLeftOuter', 'type': 'Coordinate'}, - 'eyebrow_left_inner': {'key': 'eyebrowLeftInner', 'type': 'Coordinate'}, - 'eye_left_outer': {'key': 'eyeLeftOuter', 'type': 'Coordinate'}, - 'eye_left_top': {'key': 'eyeLeftTop', 'type': 'Coordinate'}, - 'eye_left_bottom': {'key': 'eyeLeftBottom', 'type': 'Coordinate'}, - 'eye_left_inner': {'key': 'eyeLeftInner', 'type': 'Coordinate'}, - 'eyebrow_right_inner': {'key': 'eyebrowRightInner', 'type': 'Coordinate'}, - 'eyebrow_right_outer': {'key': 'eyebrowRightOuter', 'type': 'Coordinate'}, - 'eye_right_inner': {'key': 'eyeRightInner', 'type': 'Coordinate'}, - 'eye_right_top': {'key': 'eyeRightTop', 'type': 'Coordinate'}, - 'eye_right_bottom': {'key': 'eyeRightBottom', 'type': 'Coordinate'}, - 'eye_right_outer': {'key': 'eyeRightOuter', 'type': 'Coordinate'}, - 'nose_root_left': {'key': 'noseRootLeft', 'type': 'Coordinate'}, - 'nose_root_right': {'key': 'noseRootRight', 'type': 'Coordinate'}, - 'nose_left_alar_top': {'key': 'noseLeftAlarTop', 'type': 'Coordinate'}, - 'nose_right_alar_top': {'key': 'noseRightAlarTop', 'type': 'Coordinate'}, - 'nose_left_alar_out_tip': {'key': 'noseLeftAlarOutTip', 'type': 'Coordinate'}, - 'nose_right_alar_out_tip': {'key': 'noseRightAlarOutTip', 'type': 'Coordinate'}, - 'upper_lip_top': {'key': 'upperLipTop', 'type': 'Coordinate'}, - 'upper_lip_bottom': {'key': 'upperLipBottom', 'type': 'Coordinate'}, - 'under_lip_top': {'key': 'underLipTop', 'type': 'Coordinate'}, - 'under_lip_bottom': {'key': 'underLipBottom', 'type': 'Coordinate'}, - } - - def __init__(self, *, pupil_left=None, pupil_right=None, nose_tip=None, mouth_left=None, mouth_right=None, eyebrow_left_outer=None, eyebrow_left_inner=None, eye_left_outer=None, eye_left_top=None, eye_left_bottom=None, eye_left_inner=None, eyebrow_right_inner=None, eyebrow_right_outer=None, eye_right_inner=None, eye_right_top=None, eye_right_bottom=None, eye_right_outer=None, nose_root_left=None, nose_root_right=None, nose_left_alar_top=None, nose_right_alar_top=None, nose_left_alar_out_tip=None, nose_right_alar_out_tip=None, upper_lip_top=None, upper_lip_bottom=None, under_lip_top=None, under_lip_bottom=None, **kwargs) -> None: - super(FaceLandmarks, self).__init__(**kwargs) - self.pupil_left = pupil_left - self.pupil_right = pupil_right - self.nose_tip = nose_tip - self.mouth_left = mouth_left - self.mouth_right = mouth_right - self.eyebrow_left_outer = eyebrow_left_outer - self.eyebrow_left_inner = eyebrow_left_inner - self.eye_left_outer = eye_left_outer - self.eye_left_top = eye_left_top - self.eye_left_bottom = eye_left_bottom - self.eye_left_inner = eye_left_inner - self.eyebrow_right_inner = eyebrow_right_inner - self.eyebrow_right_outer = eyebrow_right_outer - self.eye_right_inner = eye_right_inner - self.eye_right_top = eye_right_top - self.eye_right_bottom = eye_right_bottom - self.eye_right_outer = eye_right_outer - self.nose_root_left = nose_root_left - self.nose_root_right = nose_root_right - self.nose_left_alar_top = nose_left_alar_top - self.nose_right_alar_top = nose_right_alar_top - self.nose_left_alar_out_tip = nose_left_alar_out_tip - self.nose_right_alar_out_tip = nose_right_alar_out_tip - self.upper_lip_top = upper_lip_top - self.upper_lip_bottom = upper_lip_bottom - self.under_lip_top = under_lip_top - self.under_lip_bottom = under_lip_bottom diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_list.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_list.py deleted file mode 100644 index 55e87974b4fb..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_list.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .meta_data_contract import MetaDataContract - - -class FaceList(MetaDataContract): - """Face list object. - - All required parameters must be populated in order to send to Azure. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - :param face_list_id: Required. FaceListId of the target face list. - :type face_list_id: str - :param persisted_faces: Persisted faces within the face list. - :type persisted_faces: - list[~azure.cognitiveservices.vision.face.models.PersistedFace] - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - 'face_list_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - 'face_list_id': {'key': 'faceListId', 'type': 'str'}, - 'persisted_faces': {'key': 'persistedFaces', 'type': '[PersistedFace]'}, - } - - def __init__(self, **kwargs): - super(FaceList, self).__init__(**kwargs) - self.face_list_id = kwargs.get('face_list_id', None) - self.persisted_faces = kwargs.get('persisted_faces', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_list_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_list_py3.py deleted file mode 100644 index 5a36c8042dfb..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_list_py3.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .meta_data_contract_py3 import MetaDataContract - - -class FaceList(MetaDataContract): - """Face list object. - - All required parameters must be populated in order to send to Azure. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - :param face_list_id: Required. FaceListId of the target face list. - :type face_list_id: str - :param persisted_faces: Persisted faces within the face list. - :type persisted_faces: - list[~azure.cognitiveservices.vision.face.models.PersistedFace] - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - 'face_list_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - 'face_list_id': {'key': 'faceListId', 'type': 'str'}, - 'persisted_faces': {'key': 'persistedFaces', 'type': '[PersistedFace]'}, - } - - def __init__(self, *, face_list_id: str, name: str=None, user_data: str=None, recognition_model="recognition_01", persisted_faces=None, **kwargs) -> None: - super(FaceList, self).__init__(name=name, user_data=user_data, recognition_model=recognition_model, **kwargs) - self.face_list_id = face_list_id - self.persisted_faces = persisted_faces diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_rectangle.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_rectangle.py deleted file mode 100644 index 025a99404fa8..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_rectangle.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class FaceRectangle(Model): - """A rectangle within which a face can be found. - - All required parameters must be populated in order to send to Azure. - - :param width: Required. The width of the rectangle, in pixels. - :type width: int - :param height: Required. The height of the rectangle, in pixels. - :type height: int - :param left: Required. The distance from the left edge if the image to the - left edge of the rectangle, in pixels. - :type left: int - :param top: Required. The distance from the top edge if the image to the - top edge of the rectangle, in pixels. - :type top: int - """ - - _validation = { - 'width': {'required': True}, - 'height': {'required': True}, - 'left': {'required': True}, - 'top': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'int'}, - 'height': {'key': 'height', 'type': 'int'}, - 'left': {'key': 'left', 'type': 'int'}, - 'top': {'key': 'top', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(FaceRectangle, self).__init__(**kwargs) - self.width = kwargs.get('width', None) - self.height = kwargs.get('height', None) - self.left = kwargs.get('left', None) - self.top = kwargs.get('top', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_rectangle_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_rectangle_py3.py deleted file mode 100644 index ff85626ad83f..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/face_rectangle_py3.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class FaceRectangle(Model): - """A rectangle within which a face can be found. - - All required parameters must be populated in order to send to Azure. - - :param width: Required. The width of the rectangle, in pixels. - :type width: int - :param height: Required. The height of the rectangle, in pixels. - :type height: int - :param left: Required. The distance from the left edge if the image to the - left edge of the rectangle, in pixels. - :type left: int - :param top: Required. The distance from the top edge if the image to the - top edge of the rectangle, in pixels. - :type top: int - """ - - _validation = { - 'width': {'required': True}, - 'height': {'required': True}, - 'left': {'required': True}, - 'top': {'required': True}, - } - - _attribute_map = { - 'width': {'key': 'width', 'type': 'int'}, - 'height': {'key': 'height', 'type': 'int'}, - 'left': {'key': 'left', 'type': 'int'}, - 'top': {'key': 'top', 'type': 'int'}, - } - - def __init__(self, *, width: int, height: int, left: int, top: int, **kwargs) -> None: - super(FaceRectangle, self).__init__(**kwargs) - self.width = width - self.height = height - self.left = left - self.top = top diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/facial_hair.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/facial_hair.py deleted file mode 100644 index f030e5b75824..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/facial_hair.py +++ /dev/null @@ -1,36 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class FacialHair(Model): - """Properties describing facial hair attributes. - - :param moustache: - :type moustache: float - :param beard: - :type beard: float - :param sideburns: - :type sideburns: float - """ - - _attribute_map = { - 'moustache': {'key': 'moustache', 'type': 'float'}, - 'beard': {'key': 'beard', 'type': 'float'}, - 'sideburns': {'key': 'sideburns', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(FacialHair, self).__init__(**kwargs) - self.moustache = kwargs.get('moustache', None) - self.beard = kwargs.get('beard', None) - self.sideburns = kwargs.get('sideburns', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/facial_hair_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/facial_hair_py3.py deleted file mode 100644 index 261f55ed2b1b..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/facial_hair_py3.py +++ /dev/null @@ -1,36 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class FacialHair(Model): - """Properties describing facial hair attributes. - - :param moustache: - :type moustache: float - :param beard: - :type beard: float - :param sideburns: - :type sideburns: float - """ - - _attribute_map = { - 'moustache': {'key': 'moustache', 'type': 'float'}, - 'beard': {'key': 'beard', 'type': 'float'}, - 'sideburns': {'key': 'sideburns', 'type': 'float'}, - } - - def __init__(self, *, moustache: float=None, beard: float=None, sideburns: float=None, **kwargs) -> None: - super(FacialHair, self).__init__(**kwargs) - self.moustache = moustache - self.beard = beard - self.sideburns = sideburns diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/find_similar_request.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/find_similar_request.py deleted file mode 100644 index 7e0120382718..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/find_similar_request.py +++ /dev/null @@ -1,75 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class FindSimilarRequest(Model): - """Request body for find similar operation. - - All required parameters must be populated in order to send to Azure. - - :param face_id: Required. FaceId of the query face. User needs to call - Face - Detect first to get a valid faceId. Note that this faceId is not - persisted and will expire 24 hours after the detection call - :type face_id: str - :param face_list_id: An existing user-specified unique candidate face - list, created in Face List - Create a Face List. Face list contains a set - of persistedFaceIds which are persisted and will never expire. Parameter - faceListId, largeFaceListId and faceIds should not be provided at the same - time. - :type face_list_id: str - :param large_face_list_id: An existing user-specified unique candidate - large face list, created in LargeFaceList - Create. Large face list - contains a set of persistedFaceIds which are persisted and will never - expire. Parameter faceListId, largeFaceListId and faceIds should not be - provided at the same time. - :type large_face_list_id: str - :param face_ids: An array of candidate faceIds. All of them are created by - Face - Detect and the faceIds will expire 24 hours after the detection - call. The number of faceIds is limited to 1000. Parameter faceListId, - largeFaceListId and faceIds should not be provided at the same time. - :type face_ids: list[str] - :param max_num_of_candidates_returned: The number of top similar faces - returned. The valid range is [1, 1000]. Default value: 20 . - :type max_num_of_candidates_returned: int - :param mode: Similar face searching mode. It can be "matchPerson" or - "matchFace". Possible values include: 'matchPerson', 'matchFace'. Default - value: "matchPerson" . - :type mode: str or - ~azure.cognitiveservices.vision.face.models.FindSimilarMatchMode - """ - - _validation = { - 'face_id': {'required': True}, - 'face_list_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'large_face_list_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'face_ids': {'max_items': 1000}, - 'max_num_of_candidates_returned': {'maximum': 1000, 'minimum': 1}, - } - - _attribute_map = { - 'face_id': {'key': 'faceId', 'type': 'str'}, - 'face_list_id': {'key': 'faceListId', 'type': 'str'}, - 'large_face_list_id': {'key': 'largeFaceListId', 'type': 'str'}, - 'face_ids': {'key': 'faceIds', 'type': '[str]'}, - 'max_num_of_candidates_returned': {'key': 'maxNumOfCandidatesReturned', 'type': 'int'}, - 'mode': {'key': 'mode', 'type': 'FindSimilarMatchMode'}, - } - - def __init__(self, **kwargs): - super(FindSimilarRequest, self).__init__(**kwargs) - self.face_id = kwargs.get('face_id', None) - self.face_list_id = kwargs.get('face_list_id', None) - self.large_face_list_id = kwargs.get('large_face_list_id', None) - self.face_ids = kwargs.get('face_ids', None) - self.max_num_of_candidates_returned = kwargs.get('max_num_of_candidates_returned', 20) - self.mode = kwargs.get('mode', "matchPerson") diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/find_similar_request_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/find_similar_request_py3.py deleted file mode 100644 index 68ceaf2a057e..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/find_similar_request_py3.py +++ /dev/null @@ -1,75 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class FindSimilarRequest(Model): - """Request body for find similar operation. - - All required parameters must be populated in order to send to Azure. - - :param face_id: Required. FaceId of the query face. User needs to call - Face - Detect first to get a valid faceId. Note that this faceId is not - persisted and will expire 24 hours after the detection call - :type face_id: str - :param face_list_id: An existing user-specified unique candidate face - list, created in Face List - Create a Face List. Face list contains a set - of persistedFaceIds which are persisted and will never expire. Parameter - faceListId, largeFaceListId and faceIds should not be provided at the same - time. - :type face_list_id: str - :param large_face_list_id: An existing user-specified unique candidate - large face list, created in LargeFaceList - Create. Large face list - contains a set of persistedFaceIds which are persisted and will never - expire. Parameter faceListId, largeFaceListId and faceIds should not be - provided at the same time. - :type large_face_list_id: str - :param face_ids: An array of candidate faceIds. All of them are created by - Face - Detect and the faceIds will expire 24 hours after the detection - call. The number of faceIds is limited to 1000. Parameter faceListId, - largeFaceListId and faceIds should not be provided at the same time. - :type face_ids: list[str] - :param max_num_of_candidates_returned: The number of top similar faces - returned. The valid range is [1, 1000]. Default value: 20 . - :type max_num_of_candidates_returned: int - :param mode: Similar face searching mode. It can be "matchPerson" or - "matchFace". Possible values include: 'matchPerson', 'matchFace'. Default - value: "matchPerson" . - :type mode: str or - ~azure.cognitiveservices.vision.face.models.FindSimilarMatchMode - """ - - _validation = { - 'face_id': {'required': True}, - 'face_list_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'large_face_list_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'face_ids': {'max_items': 1000}, - 'max_num_of_candidates_returned': {'maximum': 1000, 'minimum': 1}, - } - - _attribute_map = { - 'face_id': {'key': 'faceId', 'type': 'str'}, - 'face_list_id': {'key': 'faceListId', 'type': 'str'}, - 'large_face_list_id': {'key': 'largeFaceListId', 'type': 'str'}, - 'face_ids': {'key': 'faceIds', 'type': '[str]'}, - 'max_num_of_candidates_returned': {'key': 'maxNumOfCandidatesReturned', 'type': 'int'}, - 'mode': {'key': 'mode', 'type': 'FindSimilarMatchMode'}, - } - - def __init__(self, *, face_id: str, face_list_id: str=None, large_face_list_id: str=None, face_ids=None, max_num_of_candidates_returned: int=20, mode="matchPerson", **kwargs) -> None: - super(FindSimilarRequest, self).__init__(**kwargs) - self.face_id = face_id - self.face_list_id = face_list_id - self.large_face_list_id = large_face_list_id - self.face_ids = face_ids - self.max_num_of_candidates_returned = max_num_of_candidates_returned - self.mode = mode diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_request.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_request.py deleted file mode 100644 index a7041836294d..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_request.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class GroupRequest(Model): - """Request body for group request. - - All required parameters must be populated in order to send to Azure. - - :param face_ids: Required. Array of candidate faceId created by Face - - Detect. The maximum is 1000 faces - :type face_ids: list[str] - """ - - _validation = { - 'face_ids': {'required': True, 'max_items': 1000}, - } - - _attribute_map = { - 'face_ids': {'key': 'faceIds', 'type': '[str]'}, - } - - def __init__(self, **kwargs): - super(GroupRequest, self).__init__(**kwargs) - self.face_ids = kwargs.get('face_ids', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_request_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_request_py3.py deleted file mode 100644 index a30757a8d7c5..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_request_py3.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class GroupRequest(Model): - """Request body for group request. - - All required parameters must be populated in order to send to Azure. - - :param face_ids: Required. Array of candidate faceId created by Face - - Detect. The maximum is 1000 faces - :type face_ids: list[str] - """ - - _validation = { - 'face_ids': {'required': True, 'max_items': 1000}, - } - - _attribute_map = { - 'face_ids': {'key': 'faceIds', 'type': '[str]'}, - } - - def __init__(self, *, face_ids, **kwargs) -> None: - super(GroupRequest, self).__init__(**kwargs) - self.face_ids = face_ids diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_result.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_result.py deleted file mode 100644 index 7a5bdbb62e32..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_result.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class GroupResult(Model): - """An array of face groups based on face similarity. - - All required parameters must be populated in order to send to Azure. - - :param groups: Required. A partition of the original faces based on face - similarity. Groups are ranked by number of faces - :type groups: list[list[str]] - :param messy_group: Face ids array of faces that cannot find any similar - faces from original faces. - :type messy_group: list[str] - """ - - _validation = { - 'groups': {'required': True}, - } - - _attribute_map = { - 'groups': {'key': 'groups', 'type': '[[str]]'}, - 'messy_group': {'key': 'messyGroup', 'type': '[str]'}, - } - - def __init__(self, **kwargs): - super(GroupResult, self).__init__(**kwargs) - self.groups = kwargs.get('groups', None) - self.messy_group = kwargs.get('messy_group', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_result_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_result_py3.py deleted file mode 100644 index 5eb92f3fa8f4..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/group_result_py3.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class GroupResult(Model): - """An array of face groups based on face similarity. - - All required parameters must be populated in order to send to Azure. - - :param groups: Required. A partition of the original faces based on face - similarity. Groups are ranked by number of faces - :type groups: list[list[str]] - :param messy_group: Face ids array of faces that cannot find any similar - faces from original faces. - :type messy_group: list[str] - """ - - _validation = { - 'groups': {'required': True}, - } - - _attribute_map = { - 'groups': {'key': 'groups', 'type': '[[str]]'}, - 'messy_group': {'key': 'messyGroup', 'type': '[str]'}, - } - - def __init__(self, *, groups, messy_group=None, **kwargs) -> None: - super(GroupResult, self).__init__(**kwargs) - self.groups = groups - self.messy_group = messy_group diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair.py deleted file mode 100644 index cb6fe7d530a1..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Hair(Model): - """Properties describing hair attributes. - - :param bald: A number describing confidence level of whether the person is - bald. - :type bald: float - :param invisible: A boolean value describing whether the hair is visible - in the image. - :type invisible: bool - :param hair_color: An array of candidate colors and confidence level in - the presence of each. - :type hair_color: - list[~azure.cognitiveservices.vision.face.models.HairColor] - """ - - _attribute_map = { - 'bald': {'key': 'bald', 'type': 'float'}, - 'invisible': {'key': 'invisible', 'type': 'bool'}, - 'hair_color': {'key': 'hairColor', 'type': '[HairColor]'}, - } - - def __init__(self, **kwargs): - super(Hair, self).__init__(**kwargs) - self.bald = kwargs.get('bald', None) - self.invisible = kwargs.get('invisible', None) - self.hair_color = kwargs.get('hair_color', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_color.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_color.py deleted file mode 100644 index 287ddbb6eca0..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_color.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class HairColor(Model): - """Hair color and associated confidence. - - :param color: Name of the hair color. Possible values include: 'unknown', - 'white', 'gray', 'blond', 'brown', 'red', 'black', 'other' - :type color: str or - ~azure.cognitiveservices.vision.face.models.HairColorType - :param confidence: Confidence level of the color - :type confidence: float - """ - - _attribute_map = { - 'color': {'key': 'color', 'type': 'HairColorType'}, - 'confidence': {'key': 'confidence', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(HairColor, self).__init__(**kwargs) - self.color = kwargs.get('color', None) - self.confidence = kwargs.get('confidence', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_color_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_color_py3.py deleted file mode 100644 index c520a106a3bc..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_color_py3.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class HairColor(Model): - """Hair color and associated confidence. - - :param color: Name of the hair color. Possible values include: 'unknown', - 'white', 'gray', 'blond', 'brown', 'red', 'black', 'other' - :type color: str or - ~azure.cognitiveservices.vision.face.models.HairColorType - :param confidence: Confidence level of the color - :type confidence: float - """ - - _attribute_map = { - 'color': {'key': 'color', 'type': 'HairColorType'}, - 'confidence': {'key': 'confidence', 'type': 'float'}, - } - - def __init__(self, *, color=None, confidence: float=None, **kwargs) -> None: - super(HairColor, self).__init__(**kwargs) - self.color = color - self.confidence = confidence diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_py3.py deleted file mode 100644 index 457a80fc7ad3..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/hair_py3.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Hair(Model): - """Properties describing hair attributes. - - :param bald: A number describing confidence level of whether the person is - bald. - :type bald: float - :param invisible: A boolean value describing whether the hair is visible - in the image. - :type invisible: bool - :param hair_color: An array of candidate colors and confidence level in - the presence of each. - :type hair_color: - list[~azure.cognitiveservices.vision.face.models.HairColor] - """ - - _attribute_map = { - 'bald': {'key': 'bald', 'type': 'float'}, - 'invisible': {'key': 'invisible', 'type': 'bool'}, - 'hair_color': {'key': 'hairColor', 'type': '[HairColor]'}, - } - - def __init__(self, *, bald: float=None, invisible: bool=None, hair_color=None, **kwargs) -> None: - super(Hair, self).__init__(**kwargs) - self.bald = bald - self.invisible = invisible - self.hair_color = hair_color diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/head_pose.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/head_pose.py deleted file mode 100644 index ddc42406f476..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/head_pose.py +++ /dev/null @@ -1,36 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class HeadPose(Model): - """Properties indicating head pose of the face. - - :param roll: - :type roll: float - :param yaw: - :type yaw: float - :param pitch: - :type pitch: float - """ - - _attribute_map = { - 'roll': {'key': 'roll', 'type': 'float'}, - 'yaw': {'key': 'yaw', 'type': 'float'}, - 'pitch': {'key': 'pitch', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(HeadPose, self).__init__(**kwargs) - self.roll = kwargs.get('roll', None) - self.yaw = kwargs.get('yaw', None) - self.pitch = kwargs.get('pitch', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/head_pose_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/head_pose_py3.py deleted file mode 100644 index bad69304e32e..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/head_pose_py3.py +++ /dev/null @@ -1,36 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class HeadPose(Model): - """Properties indicating head pose of the face. - - :param roll: - :type roll: float - :param yaw: - :type yaw: float - :param pitch: - :type pitch: float - """ - - _attribute_map = { - 'roll': {'key': 'roll', 'type': 'float'}, - 'yaw': {'key': 'yaw', 'type': 'float'}, - 'pitch': {'key': 'pitch', 'type': 'float'}, - } - - def __init__(self, *, roll: float=None, yaw: float=None, pitch: float=None, **kwargs) -> None: - super(HeadPose, self).__init__(**kwargs) - self.roll = roll - self.yaw = yaw - self.pitch = pitch diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_candidate.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_candidate.py deleted file mode 100644 index 84588c7b1fed..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_candidate.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class IdentifyCandidate(Model): - """All possible faces that may qualify. - - All required parameters must be populated in order to send to Azure. - - :param person_id: Required. Id of candidate - :type person_id: str - :param confidence: Required. Confidence threshold of identification, used - to judge whether one face belong to one person. The range of - confidenceThreshold is [0, 1] (default specified by algorithm). - :type confidence: float - """ - - _validation = { - 'person_id': {'required': True}, - 'confidence': {'required': True}, - } - - _attribute_map = { - 'person_id': {'key': 'personId', 'type': 'str'}, - 'confidence': {'key': 'confidence', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(IdentifyCandidate, self).__init__(**kwargs) - self.person_id = kwargs.get('person_id', None) - self.confidence = kwargs.get('confidence', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_candidate_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_candidate_py3.py deleted file mode 100644 index 924a8bc33bf2..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_candidate_py3.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class IdentifyCandidate(Model): - """All possible faces that may qualify. - - All required parameters must be populated in order to send to Azure. - - :param person_id: Required. Id of candidate - :type person_id: str - :param confidence: Required. Confidence threshold of identification, used - to judge whether one face belong to one person. The range of - confidenceThreshold is [0, 1] (default specified by algorithm). - :type confidence: float - """ - - _validation = { - 'person_id': {'required': True}, - 'confidence': {'required': True}, - } - - _attribute_map = { - 'person_id': {'key': 'personId', 'type': 'str'}, - 'confidence': {'key': 'confidence', 'type': 'float'}, - } - - def __init__(self, *, person_id: str, confidence: float, **kwargs) -> None: - super(IdentifyCandidate, self).__init__(**kwargs) - self.person_id = person_id - self.confidence = confidence diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_request.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_request.py deleted file mode 100644 index 5b7175b406e5..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_request.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class IdentifyRequest(Model): - """Request body for identify face operation. - - All required parameters must be populated in order to send to Azure. - - :param face_ids: Required. Array of query faces faceIds, created by the - Face - Detect. Each of the faces are identified independently. The valid - number of faceIds is between [1, 10]. - :type face_ids: list[str] - :param person_group_id: PersonGroupId of the target person group, created - by PersonGroup - Create. Parameter personGroupId and largePersonGroupId - should not be provided at the same time. - :type person_group_id: str - :param large_person_group_id: LargePersonGroupId of the target large - person group, created by LargePersonGroup - Create. Parameter - personGroupId and largePersonGroupId should not be provided at the same - time. - :type large_person_group_id: str - :param max_num_of_candidates_returned: The range of - maxNumOfCandidatesReturned is between 1 and 5 (default is 1). Default - value: 1 . - :type max_num_of_candidates_returned: int - :param confidence_threshold: Confidence threshold of identification, used - to judge whether one face belong to one person. The range of - confidenceThreshold is [0, 1] (default specified by algorithm). - :type confidence_threshold: float - """ - - _validation = { - 'face_ids': {'required': True, 'max_items': 10}, - 'person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'large_person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'max_num_of_candidates_returned': {'maximum': 5, 'minimum': 1}, - } - - _attribute_map = { - 'face_ids': {'key': 'faceIds', 'type': '[str]'}, - 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, - 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, - 'max_num_of_candidates_returned': {'key': 'maxNumOfCandidatesReturned', 'type': 'int'}, - 'confidence_threshold': {'key': 'confidenceThreshold', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(IdentifyRequest, self).__init__(**kwargs) - self.face_ids = kwargs.get('face_ids', None) - self.person_group_id = kwargs.get('person_group_id', None) - self.large_person_group_id = kwargs.get('large_person_group_id', None) - self.max_num_of_candidates_returned = kwargs.get('max_num_of_candidates_returned', 1) - self.confidence_threshold = kwargs.get('confidence_threshold', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_request_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_request_py3.py deleted file mode 100644 index b9494964f853..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_request_py3.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class IdentifyRequest(Model): - """Request body for identify face operation. - - All required parameters must be populated in order to send to Azure. - - :param face_ids: Required. Array of query faces faceIds, created by the - Face - Detect. Each of the faces are identified independently. The valid - number of faceIds is between [1, 10]. - :type face_ids: list[str] - :param person_group_id: PersonGroupId of the target person group, created - by PersonGroup - Create. Parameter personGroupId and largePersonGroupId - should not be provided at the same time. - :type person_group_id: str - :param large_person_group_id: LargePersonGroupId of the target large - person group, created by LargePersonGroup - Create. Parameter - personGroupId and largePersonGroupId should not be provided at the same - time. - :type large_person_group_id: str - :param max_num_of_candidates_returned: The range of - maxNumOfCandidatesReturned is between 1 and 5 (default is 1). Default - value: 1 . - :type max_num_of_candidates_returned: int - :param confidence_threshold: Confidence threshold of identification, used - to judge whether one face belong to one person. The range of - confidenceThreshold is [0, 1] (default specified by algorithm). - :type confidence_threshold: float - """ - - _validation = { - 'face_ids': {'required': True, 'max_items': 10}, - 'person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'large_person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'max_num_of_candidates_returned': {'maximum': 5, 'minimum': 1}, - } - - _attribute_map = { - 'face_ids': {'key': 'faceIds', 'type': '[str]'}, - 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, - 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, - 'max_num_of_candidates_returned': {'key': 'maxNumOfCandidatesReturned', 'type': 'int'}, - 'confidence_threshold': {'key': 'confidenceThreshold', 'type': 'float'}, - } - - def __init__(self, *, face_ids, person_group_id: str=None, large_person_group_id: str=None, max_num_of_candidates_returned: int=1, confidence_threshold: float=None, **kwargs) -> None: - super(IdentifyRequest, self).__init__(**kwargs) - self.face_ids = face_ids - self.person_group_id = person_group_id - self.large_person_group_id = large_person_group_id - self.max_num_of_candidates_returned = max_num_of_candidates_returned - self.confidence_threshold = confidence_threshold diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_result.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_result.py deleted file mode 100644 index 4a371afc92fd..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_result.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class IdentifyResult(Model): - """Response body for identify face operation. - - All required parameters must be populated in order to send to Azure. - - :param face_id: Required. FaceId of the query face - :type face_id: str - :param candidates: Required. Identified person candidates for that face - (ranked by confidence). Array size should be no larger than input - maxNumOfCandidatesReturned. If no person is identified, will return an - empty array. - :type candidates: - list[~azure.cognitiveservices.vision.face.models.IdentifyCandidate] - """ - - _validation = { - 'face_id': {'required': True}, - 'candidates': {'required': True}, - } - - _attribute_map = { - 'face_id': {'key': 'faceId', 'type': 'str'}, - 'candidates': {'key': 'candidates', 'type': '[IdentifyCandidate]'}, - } - - def __init__(self, **kwargs): - super(IdentifyResult, self).__init__(**kwargs) - self.face_id = kwargs.get('face_id', None) - self.candidates = kwargs.get('candidates', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_result_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_result_py3.py deleted file mode 100644 index d629c03201f7..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/identify_result_py3.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class IdentifyResult(Model): - """Response body for identify face operation. - - All required parameters must be populated in order to send to Azure. - - :param face_id: Required. FaceId of the query face - :type face_id: str - :param candidates: Required. Identified person candidates for that face - (ranked by confidence). Array size should be no larger than input - maxNumOfCandidatesReturned. If no person is identified, will return an - empty array. - :type candidates: - list[~azure.cognitiveservices.vision.face.models.IdentifyCandidate] - """ - - _validation = { - 'face_id': {'required': True}, - 'candidates': {'required': True}, - } - - _attribute_map = { - 'face_id': {'key': 'faceId', 'type': 'str'}, - 'candidates': {'key': 'candidates', 'type': '[IdentifyCandidate]'}, - } - - def __init__(self, *, face_id: str, candidates, **kwargs) -> None: - super(IdentifyResult, self).__init__(**kwargs) - self.face_id = face_id - self.candidates = candidates diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/image_url.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/image_url.py deleted file mode 100644 index 25106793ad9c..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/image_url.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class ImageUrl(Model): - """ImageUrl. - - All required parameters must be populated in order to send to Azure. - - :param url: Required. Publicly reachable URL of an image - :type url: str - """ - - _validation = { - 'url': {'required': True}, - } - - _attribute_map = { - 'url': {'key': 'url', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(ImageUrl, self).__init__(**kwargs) - self.url = kwargs.get('url', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/image_url_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/image_url_py3.py deleted file mode 100644 index 3e00709f804d..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/image_url_py3.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class ImageUrl(Model): - """ImageUrl. - - All required parameters must be populated in order to send to Azure. - - :param url: Required. Publicly reachable URL of an image - :type url: str - """ - - _validation = { - 'url': {'required': True}, - } - - _attribute_map = { - 'url': {'key': 'url', 'type': 'str'}, - } - - def __init__(self, *, url: str, **kwargs) -> None: - super(ImageUrl, self).__init__(**kwargs) - self.url = url diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list.py deleted file mode 100644 index 787f6ab05318..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .meta_data_contract import MetaDataContract - - -class LargeFaceList(MetaDataContract): - """Large face list object. - - All required parameters must be populated in order to send to Azure. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - :param large_face_list_id: Required. LargeFaceListId of the target large - face list. - :type large_face_list_id: str - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - 'large_face_list_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - 'large_face_list_id': {'key': 'largeFaceListId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(LargeFaceList, self).__init__(**kwargs) - self.large_face_list_id = kwargs.get('large_face_list_id', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list_py3.py deleted file mode 100644 index fd8dc16ece21..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_face_list_py3.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .meta_data_contract_py3 import MetaDataContract - - -class LargeFaceList(MetaDataContract): - """Large face list object. - - All required parameters must be populated in order to send to Azure. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - :param large_face_list_id: Required. LargeFaceListId of the target large - face list. - :type large_face_list_id: str - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - 'large_face_list_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - 'large_face_list_id': {'key': 'largeFaceListId', 'type': 'str'}, - } - - def __init__(self, *, large_face_list_id: str, name: str=None, user_data: str=None, recognition_model="recognition_01", **kwargs) -> None: - super(LargeFaceList, self).__init__(name=name, user_data=user_data, recognition_model=recognition_model, **kwargs) - self.large_face_list_id = large_face_list_id diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_person_group.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_person_group.py deleted file mode 100644 index 91320af31be6..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_person_group.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .meta_data_contract import MetaDataContract - - -class LargePersonGroup(MetaDataContract): - """Large person group object. - - All required parameters must be populated in order to send to Azure. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - :param large_person_group_id: Required. LargePersonGroupId of the target - large person groups - :type large_person_group_id: str - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - 'large_person_group_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(LargePersonGroup, self).__init__(**kwargs) - self.large_person_group_id = kwargs.get('large_person_group_id', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_person_group_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_person_group_py3.py deleted file mode 100644 index d6de0c20da85..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/large_person_group_py3.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .meta_data_contract_py3 import MetaDataContract - - -class LargePersonGroup(MetaDataContract): - """Large person group object. - - All required parameters must be populated in order to send to Azure. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - :param large_person_group_id: Required. LargePersonGroupId of the target - large person groups - :type large_person_group_id: str - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - 'large_person_group_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, - } - - def __init__(self, *, large_person_group_id: str, name: str=None, user_data: str=None, recognition_model="recognition_01", **kwargs) -> None: - super(LargePersonGroup, self).__init__(name=name, user_data=user_data, recognition_model=recognition_model, **kwargs) - self.large_person_group_id = large_person_group_id diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/makeup.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/makeup.py deleted file mode 100644 index bc02e44ce561..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/makeup.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Makeup(Model): - """Properties describing present makeups on a given face. - - :param eye_makeup: A boolean value describing whether eye makeup is - present on a face. - :type eye_makeup: bool - :param lip_makeup: A boolean value describing whether lip makeup is - present on a face. - :type lip_makeup: bool - """ - - _attribute_map = { - 'eye_makeup': {'key': 'eyeMakeup', 'type': 'bool'}, - 'lip_makeup': {'key': 'lipMakeup', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(Makeup, self).__init__(**kwargs) - self.eye_makeup = kwargs.get('eye_makeup', None) - self.lip_makeup = kwargs.get('lip_makeup', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/makeup_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/makeup_py3.py deleted file mode 100644 index 777f7bf25d19..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/makeup_py3.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Makeup(Model): - """Properties describing present makeups on a given face. - - :param eye_makeup: A boolean value describing whether eye makeup is - present on a face. - :type eye_makeup: bool - :param lip_makeup: A boolean value describing whether lip makeup is - present on a face. - :type lip_makeup: bool - """ - - _attribute_map = { - 'eye_makeup': {'key': 'eyeMakeup', 'type': 'bool'}, - 'lip_makeup': {'key': 'lipMakeup', 'type': 'bool'}, - } - - def __init__(self, *, eye_makeup: bool=None, lip_makeup: bool=None, **kwargs) -> None: - super(Makeup, self).__init__(**kwargs) - self.eye_makeup = eye_makeup - self.lip_makeup = lip_makeup diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/meta_data_contract.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/meta_data_contract.py deleted file mode 100644 index e227cd4a15b5..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/meta_data_contract.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .name_and_user_data_contract import NameAndUserDataContract - - -class MetaDataContract(NameAndUserDataContract): - """A combination of user defined name and user specified data and recognition - model name for largePersonGroup/personGroup, and largeFaceList/faceList. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(MetaDataContract, self).__init__(**kwargs) - self.recognition_model = kwargs.get('recognition_model', "recognition_01") diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/meta_data_contract_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/meta_data_contract_py3.py deleted file mode 100644 index 5a6f3de5332f..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/meta_data_contract_py3.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .name_and_user_data_contract_py3 import NameAndUserDataContract - - -class MetaDataContract(NameAndUserDataContract): - """A combination of user defined name and user specified data and recognition - model name for largePersonGroup/personGroup, and largeFaceList/faceList. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - } - - def __init__(self, *, name: str=None, user_data: str=None, recognition_model="recognition_01", **kwargs) -> None: - super(MetaDataContract, self).__init__(name=name, user_data=user_data, **kwargs) - self.recognition_model = recognition_model diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/name_and_user_data_contract.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/name_and_user_data_contract.py deleted file mode 100644 index ef1f79d83d24..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/name_and_user_data_contract.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class NameAndUserDataContract(Model): - """A combination of user defined name and user specified data for the person, - largePersonGroup/personGroup, and largeFaceList/faceList. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(NameAndUserDataContract, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.user_data = kwargs.get('user_data', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/name_and_user_data_contract_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/name_and_user_data_contract_py3.py deleted file mode 100644 index 29c856742584..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/name_and_user_data_contract_py3.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class NameAndUserDataContract(Model): - """A combination of user defined name and user specified data for the person, - largePersonGroup/personGroup, and largeFaceList/faceList. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - } - - def __init__(self, *, name: str=None, user_data: str=None, **kwargs) -> None: - super(NameAndUserDataContract, self).__init__(**kwargs) - self.name = name - self.user_data = user_data diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/noise.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/noise.py deleted file mode 100644 index 565291f5b46d..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/noise.py +++ /dev/null @@ -1,37 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Noise(Model): - """Properties describing noise level of the image. - - :param noise_level: An enum value indicating level of noise. Possible - values include: 'Low', 'Medium', 'High' - :type noise_level: str or - ~azure.cognitiveservices.vision.face.models.NoiseLevel - :param value: A number indicating level of noise level ranging from 0 to - 1. [0, 0.25) is under exposure. [0.25, 0.75) is good exposure. [0.75, 1] - is over exposure. [0, 0.3) is low noise level. [0.3, 0.7) is medium noise - level. [0.7, 1] is high noise level. - :type value: float - """ - - _attribute_map = { - 'noise_level': {'key': 'noiseLevel', 'type': 'NoiseLevel'}, - 'value': {'key': 'value', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(Noise, self).__init__(**kwargs) - self.noise_level = kwargs.get('noise_level', None) - self.value = kwargs.get('value', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/noise_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/noise_py3.py deleted file mode 100644 index f5445d995eda..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/noise_py3.py +++ /dev/null @@ -1,37 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Noise(Model): - """Properties describing noise level of the image. - - :param noise_level: An enum value indicating level of noise. Possible - values include: 'Low', 'Medium', 'High' - :type noise_level: str or - ~azure.cognitiveservices.vision.face.models.NoiseLevel - :param value: A number indicating level of noise level ranging from 0 to - 1. [0, 0.25) is under exposure. [0.25, 0.75) is good exposure. [0.75, 1] - is over exposure. [0, 0.3) is low noise level. [0.3, 0.7) is medium noise - level. [0.7, 1] is high noise level. - :type value: float - """ - - _attribute_map = { - 'noise_level': {'key': 'noiseLevel', 'type': 'NoiseLevel'}, - 'value': {'key': 'value', 'type': 'float'}, - } - - def __init__(self, *, noise_level=None, value: float=None, **kwargs) -> None: - super(Noise, self).__init__(**kwargs) - self.noise_level = noise_level - self.value = value diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/occlusion.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/occlusion.py deleted file mode 100644 index c185869fb68d..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/occlusion.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Occlusion(Model): - """Properties describing occlusions on a given face. - - :param forehead_occluded: A boolean value indicating whether forehead is - occluded. - :type forehead_occluded: bool - :param eye_occluded: A boolean value indicating whether eyes are occluded. - :type eye_occluded: bool - :param mouth_occluded: A boolean value indicating whether the mouth is - occluded. - :type mouth_occluded: bool - """ - - _attribute_map = { - 'forehead_occluded': {'key': 'foreheadOccluded', 'type': 'bool'}, - 'eye_occluded': {'key': 'eyeOccluded', 'type': 'bool'}, - 'mouth_occluded': {'key': 'mouthOccluded', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(Occlusion, self).__init__(**kwargs) - self.forehead_occluded = kwargs.get('forehead_occluded', None) - self.eye_occluded = kwargs.get('eye_occluded', None) - self.mouth_occluded = kwargs.get('mouth_occluded', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/occlusion_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/occlusion_py3.py deleted file mode 100644 index fd3cfed50f6a..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/occlusion_py3.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Occlusion(Model): - """Properties describing occlusions on a given face. - - :param forehead_occluded: A boolean value indicating whether forehead is - occluded. - :type forehead_occluded: bool - :param eye_occluded: A boolean value indicating whether eyes are occluded. - :type eye_occluded: bool - :param mouth_occluded: A boolean value indicating whether the mouth is - occluded. - :type mouth_occluded: bool - """ - - _attribute_map = { - 'forehead_occluded': {'key': 'foreheadOccluded', 'type': 'bool'}, - 'eye_occluded': {'key': 'eyeOccluded', 'type': 'bool'}, - 'mouth_occluded': {'key': 'mouthOccluded', 'type': 'bool'}, - } - - def __init__(self, *, forehead_occluded: bool=None, eye_occluded: bool=None, mouth_occluded: bool=None, **kwargs) -> None: - super(Occlusion, self).__init__(**kwargs) - self.forehead_occluded = forehead_occluded - self.eye_occluded = eye_occluded - self.mouth_occluded = mouth_occluded diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/operation_status.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/operation_status.py deleted file mode 100644 index 0a172817dd5c..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/operation_status.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class OperationStatus(Model): - """Operation status object. Operation refers to the asynchronous backend task - including taking a snapshot and applying a snapshot. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. Operation status: notstarted, running, succeeded, - failed. If the operation is requested and waiting to perform, the status - is notstarted. If the operation is ongoing in backend, the status is - running. Status succeeded means the operation is completed successfully, - specifically for snapshot taking operation, it illustrates the snapshot is - well taken and ready to apply, and for snapshot applying operation, it - presents the target object has finished creating by the snapshot and ready - to be used. Status failed is often caused by editing the source object - while taking the snapshot or editing the target object while applying the - snapshot before completion, see the field "message" to check the failure - reason. Possible values include: 'notstarted', 'running', 'succeeded', - 'failed' - :type status: str or - ~azure.cognitiveservices.vision.face.models.OperationStatusType - :param created_time: Required. A combined UTC date and time string that - describes the time when the operation (take or apply a snapshot) is - requested. E.g. 2018-12-25T11:41:02.2331413Z. - :type created_time: datetime - :param last_action_time: A combined UTC date and time string that - describes the last time the operation (take or apply a snapshot) is - actively migrating data. The lastActionTime will keep increasing until the - operation finishes. E.g. 2018-12-25T11:51:27.8705696Z. - :type last_action_time: datetime - :param resource_location: When the operation succeeds successfully, for - snapshot taking operation the snapshot id will be included in this field, - and for snapshot applying operation, the path to get the target object - will be returned in this field. - :type resource_location: str - :param message: Show failure message when operation fails (omitted when - operation succeeds). - :type message: str - """ - - _validation = { - 'status': {'required': True}, - 'created_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'OperationStatusType'}, - 'created_time': {'key': 'createdTime', 'type': 'iso-8601'}, - 'last_action_time': {'key': 'lastActionTime', 'type': 'iso-8601'}, - 'resource_location': {'key': 'resourceLocation', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(OperationStatus, self).__init__(**kwargs) - self.status = kwargs.get('status', None) - self.created_time = kwargs.get('created_time', None) - self.last_action_time = kwargs.get('last_action_time', None) - self.resource_location = kwargs.get('resource_location', None) - self.message = kwargs.get('message', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/operation_status_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/operation_status_py3.py deleted file mode 100644 index 37c6c4b8b516..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/operation_status_py3.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class OperationStatus(Model): - """Operation status object. Operation refers to the asynchronous backend task - including taking a snapshot and applying a snapshot. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. Operation status: notstarted, running, succeeded, - failed. If the operation is requested and waiting to perform, the status - is notstarted. If the operation is ongoing in backend, the status is - running. Status succeeded means the operation is completed successfully, - specifically for snapshot taking operation, it illustrates the snapshot is - well taken and ready to apply, and for snapshot applying operation, it - presents the target object has finished creating by the snapshot and ready - to be used. Status failed is often caused by editing the source object - while taking the snapshot or editing the target object while applying the - snapshot before completion, see the field "message" to check the failure - reason. Possible values include: 'notstarted', 'running', 'succeeded', - 'failed' - :type status: str or - ~azure.cognitiveservices.vision.face.models.OperationStatusType - :param created_time: Required. A combined UTC date and time string that - describes the time when the operation (take or apply a snapshot) is - requested. E.g. 2018-12-25T11:41:02.2331413Z. - :type created_time: datetime - :param last_action_time: A combined UTC date and time string that - describes the last time the operation (take or apply a snapshot) is - actively migrating data. The lastActionTime will keep increasing until the - operation finishes. E.g. 2018-12-25T11:51:27.8705696Z. - :type last_action_time: datetime - :param resource_location: When the operation succeeds successfully, for - snapshot taking operation the snapshot id will be included in this field, - and for snapshot applying operation, the path to get the target object - will be returned in this field. - :type resource_location: str - :param message: Show failure message when operation fails (omitted when - operation succeeds). - :type message: str - """ - - _validation = { - 'status': {'required': True}, - 'created_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'OperationStatusType'}, - 'created_time': {'key': 'createdTime', 'type': 'iso-8601'}, - 'last_action_time': {'key': 'lastActionTime', 'type': 'iso-8601'}, - 'resource_location': {'key': 'resourceLocation', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - } - - def __init__(self, *, status, created_time, last_action_time=None, resource_location: str=None, message: str=None, **kwargs) -> None: - super(OperationStatus, self).__init__(**kwargs) - self.status = status - self.created_time = created_time - self.last_action_time = last_action_time - self.resource_location = resource_location - self.message = message diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/persisted_face.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/persisted_face.py deleted file mode 100644 index e8a1f236eaa1..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/persisted_face.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class PersistedFace(Model): - """PersonFace object. - - All required parameters must be populated in order to send to Azure. - - :param persisted_face_id: Required. The persistedFaceId of the target - face, which is persisted and will not expire. Different from faceId - created by Face - Detect and will expire in 24 hours after the detection - call. - :type persisted_face_id: str - :param user_data: User-provided data attached to the face. The size limit - is 1KB. - :type user_data: str - """ - - _validation = { - 'persisted_face_id': {'required': True}, - 'user_data': {'max_length': 1024}, - } - - _attribute_map = { - 'persisted_face_id': {'key': 'persistedFaceId', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(PersistedFace, self).__init__(**kwargs) - self.persisted_face_id = kwargs.get('persisted_face_id', None) - self.user_data = kwargs.get('user_data', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/persisted_face_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/persisted_face_py3.py deleted file mode 100644 index f26dedad729c..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/persisted_face_py3.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class PersistedFace(Model): - """PersonFace object. - - All required parameters must be populated in order to send to Azure. - - :param persisted_face_id: Required. The persistedFaceId of the target - face, which is persisted and will not expire. Different from faceId - created by Face - Detect and will expire in 24 hours after the detection - call. - :type persisted_face_id: str - :param user_data: User-provided data attached to the face. The size limit - is 1KB. - :type user_data: str - """ - - _validation = { - 'persisted_face_id': {'required': True}, - 'user_data': {'max_length': 1024}, - } - - _attribute_map = { - 'persisted_face_id': {'key': 'persistedFaceId', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - } - - def __init__(self, *, persisted_face_id: str, user_data: str=None, **kwargs) -> None: - super(PersistedFace, self).__init__(**kwargs) - self.persisted_face_id = persisted_face_id - self.user_data = user_data diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person.py deleted file mode 100644 index 3e87905b2ded..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .name_and_user_data_contract import NameAndUserDataContract - - -class Person(NameAndUserDataContract): - """Person object. - - All required parameters must be populated in order to send to Azure. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param person_id: Required. PersonId of the target face list. - :type person_id: str - :param persisted_face_ids: PersistedFaceIds of registered faces in the - person. These persistedFaceIds are returned from Person - Add a Person - Face, and will not expire. - :type persisted_face_ids: list[str] - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - 'person_id': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'person_id': {'key': 'personId', 'type': 'str'}, - 'persisted_face_ids': {'key': 'persistedFaceIds', 'type': '[str]'}, - } - - def __init__(self, **kwargs): - super(Person, self).__init__(**kwargs) - self.person_id = kwargs.get('person_id', None) - self.persisted_face_ids = kwargs.get('persisted_face_ids', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_group.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_group.py deleted file mode 100644 index 385c943bddbf..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_group.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .meta_data_contract import MetaDataContract - - -class PersonGroup(MetaDataContract): - """Person group object. - - All required parameters must be populated in order to send to Azure. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - :param person_group_id: Required. PersonGroupId of the target person - group. - :type person_group_id: str - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - 'person_group_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(PersonGroup, self).__init__(**kwargs) - self.person_group_id = kwargs.get('person_group_id', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_group_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_group_py3.py deleted file mode 100644 index 8ae811b81594..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_group_py3.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .meta_data_contract_py3 import MetaDataContract - - -class PersonGroup(MetaDataContract): - """Person group object. - - All required parameters must be populated in order to send to Azure. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param recognition_model: Possible values include: 'recognition_01', - 'recognition_02'. Default value: "recognition_01" . - :type recognition_model: str or - ~azure.cognitiveservices.vision.face.models.RecognitionModel - :param person_group_id: Required. PersonGroupId of the target person - group. - :type person_group_id: str - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - 'person_group_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'recognition_model': {'key': 'recognitionModel', 'type': 'str'}, - 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, - } - - def __init__(self, *, person_group_id: str, name: str=None, user_data: str=None, recognition_model="recognition_01", **kwargs) -> None: - super(PersonGroup, self).__init__(name=name, user_data=user_data, recognition_model=recognition_model, **kwargs) - self.person_group_id = person_group_id diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_py3.py deleted file mode 100644 index 230f8afd82c5..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/person_py3.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .name_and_user_data_contract_py3 import NameAndUserDataContract - - -class Person(NameAndUserDataContract): - """Person object. - - All required parameters must be populated in order to send to Azure. - - :param name: User defined name, maximum length is 128. - :type name: str - :param user_data: User specified data. Length should not exceed 16KB. - :type user_data: str - :param person_id: Required. PersonId of the target face list. - :type person_id: str - :param persisted_face_ids: PersistedFaceIds of registered faces in the - person. These persistedFaceIds are returned from Person - Add a Person - Face, and will not expire. - :type persisted_face_ids: list[str] - """ - - _validation = { - 'name': {'max_length': 128}, - 'user_data': {'max_length': 16384}, - 'person_id': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'person_id': {'key': 'personId', 'type': 'str'}, - 'persisted_face_ids': {'key': 'persistedFaceIds', 'type': '[str]'}, - } - - def __init__(self, *, person_id: str, name: str=None, user_data: str=None, persisted_face_ids=None, **kwargs) -> None: - super(Person, self).__init__(name=name, user_data=user_data, **kwargs) - self.person_id = person_id - self.persisted_face_ids = persisted_face_ids diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/similar_face.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/similar_face.py deleted file mode 100644 index 59006700234b..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/similar_face.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class SimilarFace(Model): - """Response body for find similar face operation. - - All required parameters must be populated in order to send to Azure. - - :param face_id: FaceId of candidate face when find by faceIds. faceId is - created by Face - Detect and will expire 24 hours after the detection call - :type face_id: str - :param persisted_face_id: PersistedFaceId of candidate face when find by - faceListId. persistedFaceId in face list is persisted and will not expire. - As showed in below response - :type persisted_face_id: str - :param confidence: Required. Similarity confidence of the candidate face. - The higher confidence, the more similar. Range between [0,1]. - :type confidence: float - """ - - _validation = { - 'confidence': {'required': True}, - } - - _attribute_map = { - 'face_id': {'key': 'faceId', 'type': 'str'}, - 'persisted_face_id': {'key': 'persistedFaceId', 'type': 'str'}, - 'confidence': {'key': 'confidence', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(SimilarFace, self).__init__(**kwargs) - self.face_id = kwargs.get('face_id', None) - self.persisted_face_id = kwargs.get('persisted_face_id', None) - self.confidence = kwargs.get('confidence', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/similar_face_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/similar_face_py3.py deleted file mode 100644 index 8d464fb315d5..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/similar_face_py3.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class SimilarFace(Model): - """Response body for find similar face operation. - - All required parameters must be populated in order to send to Azure. - - :param face_id: FaceId of candidate face when find by faceIds. faceId is - created by Face - Detect and will expire 24 hours after the detection call - :type face_id: str - :param persisted_face_id: PersistedFaceId of candidate face when find by - faceListId. persistedFaceId in face list is persisted and will not expire. - As showed in below response - :type persisted_face_id: str - :param confidence: Required. Similarity confidence of the candidate face. - The higher confidence, the more similar. Range between [0,1]. - :type confidence: float - """ - - _validation = { - 'confidence': {'required': True}, - } - - _attribute_map = { - 'face_id': {'key': 'faceId', 'type': 'str'}, - 'persisted_face_id': {'key': 'persistedFaceId', 'type': 'str'}, - 'confidence': {'key': 'confidence', 'type': 'float'}, - } - - def __init__(self, *, confidence: float, face_id: str=None, persisted_face_id: str=None, **kwargs) -> None: - super(SimilarFace, self).__init__(**kwargs) - self.face_id = face_id - self.persisted_face_id = persisted_face_id - self.confidence = confidence diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/snapshot.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/snapshot.py deleted file mode 100644 index dce7ab7ccfbc..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/snapshot.py +++ /dev/null @@ -1,78 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Snapshot(Model): - """Snapshot object. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. Snapshot id. - :type id: str - :param account: Required. Azure Cognitive Service Face account id of the - subscriber who created the snapshot by Snapshot - Take. - :type account: str - :param type: Required. Type of the source object in the snapshot, - specified by the subscriber who created the snapshot when calling Snapshot - - Take. Currently FaceList, PersonGroup, LargeFaceList and - LargePersonGroup are supported. Possible values include: 'FaceList', - 'LargeFaceList', 'LargePersonGroup', 'PersonGroup' - :type type: str or - ~azure.cognitiveservices.vision.face.models.SnapshotObjectType - :param apply_scope: Required. Array of the target Face subscription ids - for the snapshot, specified by the user who created the snapshot when - calling Snapshot - Take. For each snapshot, only subscriptions included in - the applyScope of Snapshot - Take can apply it. - :type apply_scope: list[str] - :param user_data: User specified data about the snapshot for any purpose. - Length should not exceed 16KB. - :type user_data: str - :param created_time: Required. A combined UTC date and time string that - describes the created time of the snapshot. E.g. - 2018-12-25T11:41:02.2331413Z. - :type created_time: datetime - :param last_update_time: Required. A combined UTC date and time string - that describes the last time when the snapshot was created or updated by - Snapshot - Update. E.g. 2018-12-25T11:51:27.8705696Z. - :type last_update_time: datetime - """ - - _validation = { - 'id': {'required': True}, - 'account': {'required': True}, - 'type': {'required': True}, - 'apply_scope': {'required': True}, - 'user_data': {'max_length': 16384}, - 'created_time': {'required': True}, - 'last_update_time': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'account': {'key': 'account', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'SnapshotObjectType'}, - 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'created_time': {'key': 'createdTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - } - - def __init__(self, **kwargs): - super(Snapshot, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.account = kwargs.get('account', None) - self.type = kwargs.get('type', None) - self.apply_scope = kwargs.get('apply_scope', None) - self.user_data = kwargs.get('user_data', None) - self.created_time = kwargs.get('created_time', None) - self.last_update_time = kwargs.get('last_update_time', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/snapshot_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/snapshot_py3.py deleted file mode 100644 index 3d826c906a3c..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/snapshot_py3.py +++ /dev/null @@ -1,78 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Snapshot(Model): - """Snapshot object. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. Snapshot id. - :type id: str - :param account: Required. Azure Cognitive Service Face account id of the - subscriber who created the snapshot by Snapshot - Take. - :type account: str - :param type: Required. Type of the source object in the snapshot, - specified by the subscriber who created the snapshot when calling Snapshot - - Take. Currently FaceList, PersonGroup, LargeFaceList and - LargePersonGroup are supported. Possible values include: 'FaceList', - 'LargeFaceList', 'LargePersonGroup', 'PersonGroup' - :type type: str or - ~azure.cognitiveservices.vision.face.models.SnapshotObjectType - :param apply_scope: Required. Array of the target Face subscription ids - for the snapshot, specified by the user who created the snapshot when - calling Snapshot - Take. For each snapshot, only subscriptions included in - the applyScope of Snapshot - Take can apply it. - :type apply_scope: list[str] - :param user_data: User specified data about the snapshot for any purpose. - Length should not exceed 16KB. - :type user_data: str - :param created_time: Required. A combined UTC date and time string that - describes the created time of the snapshot. E.g. - 2018-12-25T11:41:02.2331413Z. - :type created_time: datetime - :param last_update_time: Required. A combined UTC date and time string - that describes the last time when the snapshot was created or updated by - Snapshot - Update. E.g. 2018-12-25T11:51:27.8705696Z. - :type last_update_time: datetime - """ - - _validation = { - 'id': {'required': True}, - 'account': {'required': True}, - 'type': {'required': True}, - 'apply_scope': {'required': True}, - 'user_data': {'max_length': 16384}, - 'created_time': {'required': True}, - 'last_update_time': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'account': {'key': 'account', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'SnapshotObjectType'}, - 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - 'created_time': {'key': 'createdTime', 'type': 'iso-8601'}, - 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, - } - - def __init__(self, *, id: str, account: str, type, apply_scope, created_time, last_update_time, user_data: str=None, **kwargs) -> None: - super(Snapshot, self).__init__(**kwargs) - self.id = id - self.account = account - self.type = type - self.apply_scope = apply_scope - self.user_data = user_data - self.created_time = created_time - self.last_update_time = last_update_time diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/take_snapshot_request.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/take_snapshot_request.py deleted file mode 100644 index 69cc3b0514d9..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/take_snapshot_request.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class TakeSnapshotRequest(Model): - """Request body for taking snapshot operation. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. User specified type for the source object to take - snapshot from. Currently FaceList, PersonGroup, LargeFaceList and - LargePersonGroup are supported. Possible values include: 'FaceList', - 'LargeFaceList', 'LargePersonGroup', 'PersonGroup' - :type type: str or - ~azure.cognitiveservices.vision.face.models.SnapshotObjectType - :param object_id: Required. User specified source object id to take - snapshot from. - :type object_id: str - :param apply_scope: Required. User specified array of target Face - subscription ids for the snapshot. For each snapshot, only subscriptions - included in the applyScope of Snapshot - Take can apply it. - :type apply_scope: list[str] - :param user_data: User specified data about the snapshot for any purpose. - Length should not exceed 16KB. - :type user_data: str - """ - - _validation = { - 'type': {'required': True}, - 'object_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'apply_scope': {'required': True}, - 'user_data': {'max_length': 16384}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'SnapshotObjectType'}, - 'object_id': {'key': 'objectId', 'type': 'str'}, - 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(TakeSnapshotRequest, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.object_id = kwargs.get('object_id', None) - self.apply_scope = kwargs.get('apply_scope', None) - self.user_data = kwargs.get('user_data', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/take_snapshot_request_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/take_snapshot_request_py3.py deleted file mode 100644 index 01368ebe5909..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/take_snapshot_request_py3.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class TakeSnapshotRequest(Model): - """Request body for taking snapshot operation. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. User specified type for the source object to take - snapshot from. Currently FaceList, PersonGroup, LargeFaceList and - LargePersonGroup are supported. Possible values include: 'FaceList', - 'LargeFaceList', 'LargePersonGroup', 'PersonGroup' - :type type: str or - ~azure.cognitiveservices.vision.face.models.SnapshotObjectType - :param object_id: Required. User specified source object id to take - snapshot from. - :type object_id: str - :param apply_scope: Required. User specified array of target Face - subscription ids for the snapshot. For each snapshot, only subscriptions - included in the applyScope of Snapshot - Take can apply it. - :type apply_scope: list[str] - :param user_data: User specified data about the snapshot for any purpose. - Length should not exceed 16KB. - :type user_data: str - """ - - _validation = { - 'type': {'required': True}, - 'object_id': {'required': True, 'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'apply_scope': {'required': True}, - 'user_data': {'max_length': 16384}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'SnapshotObjectType'}, - 'object_id': {'key': 'objectId', 'type': 'str'}, - 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - } - - def __init__(self, *, type, object_id: str, apply_scope, user_data: str=None, **kwargs) -> None: - super(TakeSnapshotRequest, self).__init__(**kwargs) - self.type = type - self.object_id = object_id - self.apply_scope = apply_scope - self.user_data = user_data diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/training_status.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/training_status.py deleted file mode 100644 index 718da91dc34b..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/training_status.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class TrainingStatus(Model): - """Training status object. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. Training status: notstarted, running, succeeded, - failed. If the training process is waiting to perform, the status is - notstarted. If the training is ongoing, the status is running. Status - succeed means this person group or large person group is ready for Face - - Identify, or this large face list is ready for Face - Find Similar. Status - failed is often caused by no person or no persisted face exist in the - person group or large person group, or no persisted face exist in the - large face list. Possible values include: 'nonstarted', 'running', - 'succeeded', 'failed' - :type status: str or - ~azure.cognitiveservices.vision.face.models.TrainingStatusType - :param created: Required. A combined UTC date and time string that - describes the created time of the person group, large person group or - large face list. - :type created: datetime - :param last_action: A combined UTC date and time string that describes the - last modify time of the person group, large person group or large face - list, could be null value when the group is not successfully trained. - :type last_action: datetime - :param last_successful_training: A combined UTC date and time string that - describes the last successful training time of the person group, large - person group or large face list. - :type last_successful_training: datetime - :param message: Show failure message when training failed (omitted when - training succeed). - :type message: str - """ - - _validation = { - 'status': {'required': True}, - 'created': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'TrainingStatusType'}, - 'created': {'key': 'createdDateTime', 'type': 'iso-8601'}, - 'last_action': {'key': 'lastActionDateTime', 'type': 'iso-8601'}, - 'last_successful_training': {'key': 'lastSuccessfulTrainingDateTime', 'type': 'iso-8601'}, - 'message': {'key': 'message', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(TrainingStatus, self).__init__(**kwargs) - self.status = kwargs.get('status', None) - self.created = kwargs.get('created', None) - self.last_action = kwargs.get('last_action', None) - self.last_successful_training = kwargs.get('last_successful_training', None) - self.message = kwargs.get('message', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/training_status_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/training_status_py3.py deleted file mode 100644 index 50857f65cddb..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/training_status_py3.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class TrainingStatus(Model): - """Training status object. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. Training status: notstarted, running, succeeded, - failed. If the training process is waiting to perform, the status is - notstarted. If the training is ongoing, the status is running. Status - succeed means this person group or large person group is ready for Face - - Identify, or this large face list is ready for Face - Find Similar. Status - failed is often caused by no person or no persisted face exist in the - person group or large person group, or no persisted face exist in the - large face list. Possible values include: 'nonstarted', 'running', - 'succeeded', 'failed' - :type status: str or - ~azure.cognitiveservices.vision.face.models.TrainingStatusType - :param created: Required. A combined UTC date and time string that - describes the created time of the person group, large person group or - large face list. - :type created: datetime - :param last_action: A combined UTC date and time string that describes the - last modify time of the person group, large person group or large face - list, could be null value when the group is not successfully trained. - :type last_action: datetime - :param last_successful_training: A combined UTC date and time string that - describes the last successful training time of the person group, large - person group or large face list. - :type last_successful_training: datetime - :param message: Show failure message when training failed (omitted when - training succeed). - :type message: str - """ - - _validation = { - 'status': {'required': True}, - 'created': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'TrainingStatusType'}, - 'created': {'key': 'createdDateTime', 'type': 'iso-8601'}, - 'last_action': {'key': 'lastActionDateTime', 'type': 'iso-8601'}, - 'last_successful_training': {'key': 'lastSuccessfulTrainingDateTime', 'type': 'iso-8601'}, - 'message': {'key': 'message', 'type': 'str'}, - } - - def __init__(self, *, status, created, last_action=None, last_successful_training=None, message: str=None, **kwargs) -> None: - super(TrainingStatus, self).__init__(**kwargs) - self.status = status - self.created = created - self.last_action = last_action - self.last_successful_training = last_successful_training - self.message = message diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_face_request.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_face_request.py deleted file mode 100644 index d2df86ba2b30..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_face_request.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class UpdateFaceRequest(Model): - """Request to update face data. - - :param user_data: User-provided data attached to the face. The size limit - is 1KB. - :type user_data: str - """ - - _validation = { - 'user_data': {'max_length': 1024}, - } - - _attribute_map = { - 'user_data': {'key': 'userData', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(UpdateFaceRequest, self).__init__(**kwargs) - self.user_data = kwargs.get('user_data', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_face_request_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_face_request_py3.py deleted file mode 100644 index 2610f03251cd..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_face_request_py3.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class UpdateFaceRequest(Model): - """Request to update face data. - - :param user_data: User-provided data attached to the face. The size limit - is 1KB. - :type user_data: str - """ - - _validation = { - 'user_data': {'max_length': 1024}, - } - - _attribute_map = { - 'user_data': {'key': 'userData', 'type': 'str'}, - } - - def __init__(self, *, user_data: str=None, **kwargs) -> None: - super(UpdateFaceRequest, self).__init__(**kwargs) - self.user_data = user_data diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_snapshot_request.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_snapshot_request.py deleted file mode 100644 index a6203ec9724d..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_snapshot_request.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class UpdateSnapshotRequest(Model): - """Request body for updating a snapshot, with a combination of user defined - apply scope and user specified data. - - :param apply_scope: Array of the target Face subscription ids for the - snapshot, specified by the user who created the snapshot when calling - Snapshot - Take. For each snapshot, only subscriptions included in the - applyScope of Snapshot - Take can apply it. - :type apply_scope: list[str] - :param user_data: User specified data about the snapshot for any purpose. - Length should not exceed 16KB. - :type user_data: str - """ - - _validation = { - 'user_data': {'max_length': 16384}, - } - - _attribute_map = { - 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(UpdateSnapshotRequest, self).__init__(**kwargs) - self.apply_scope = kwargs.get('apply_scope', None) - self.user_data = kwargs.get('user_data', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_snapshot_request_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_snapshot_request_py3.py deleted file mode 100644 index 31258cb36266..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/update_snapshot_request_py3.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class UpdateSnapshotRequest(Model): - """Request body for updating a snapshot, with a combination of user defined - apply scope and user specified data. - - :param apply_scope: Array of the target Face subscription ids for the - snapshot, specified by the user who created the snapshot when calling - Snapshot - Take. For each snapshot, only subscriptions included in the - applyScope of Snapshot - Take can apply it. - :type apply_scope: list[str] - :param user_data: User specified data about the snapshot for any purpose. - Length should not exceed 16KB. - :type user_data: str - """ - - _validation = { - 'user_data': {'max_length': 16384}, - } - - _attribute_map = { - 'apply_scope': {'key': 'applyScope', 'type': '[str]'}, - 'user_data': {'key': 'userData', 'type': 'str'}, - } - - def __init__(self, *, apply_scope=None, user_data: str=None, **kwargs) -> None: - super(UpdateSnapshotRequest, self).__init__(**kwargs) - self.apply_scope = apply_scope - self.user_data = user_data diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_face_request.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_face_request.py deleted file mode 100644 index 9c22d1921f50..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_face_request.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class VerifyFaceToFaceRequest(Model): - """Request body for face to face verification. - - All required parameters must be populated in order to send to Azure. - - :param face_id1: Required. FaceId of the first face, comes from Face - - Detect - :type face_id1: str - :param face_id2: Required. FaceId of the second face, comes from Face - - Detect - :type face_id2: str - """ - - _validation = { - 'face_id1': {'required': True}, - 'face_id2': {'required': True}, - } - - _attribute_map = { - 'face_id1': {'key': 'faceId1', 'type': 'str'}, - 'face_id2': {'key': 'faceId2', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(VerifyFaceToFaceRequest, self).__init__(**kwargs) - self.face_id1 = kwargs.get('face_id1', None) - self.face_id2 = kwargs.get('face_id2', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_face_request_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_face_request_py3.py deleted file mode 100644 index 9c5f2f347255..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_face_request_py3.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class VerifyFaceToFaceRequest(Model): - """Request body for face to face verification. - - All required parameters must be populated in order to send to Azure. - - :param face_id1: Required. FaceId of the first face, comes from Face - - Detect - :type face_id1: str - :param face_id2: Required. FaceId of the second face, comes from Face - - Detect - :type face_id2: str - """ - - _validation = { - 'face_id1': {'required': True}, - 'face_id2': {'required': True}, - } - - _attribute_map = { - 'face_id1': {'key': 'faceId1', 'type': 'str'}, - 'face_id2': {'key': 'faceId2', 'type': 'str'}, - } - - def __init__(self, *, face_id1: str, face_id2: str, **kwargs) -> None: - super(VerifyFaceToFaceRequest, self).__init__(**kwargs) - self.face_id1 = face_id1 - self.face_id2 = face_id2 diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_person_request.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_person_request.py deleted file mode 100644 index 91169e15391e..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_person_request.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class VerifyFaceToPersonRequest(Model): - """Request body for face to person verification. - - All required parameters must be populated in order to send to Azure. - - :param face_id: Required. FaceId of the face, comes from Face - Detect - :type face_id: str - :param person_group_id: Using existing personGroupId and personId for fast - loading a specified person. personGroupId is created in PersonGroup - - Create. Parameter personGroupId and largePersonGroupId should not be - provided at the same time. - :type person_group_id: str - :param large_person_group_id: Using existing largePersonGroupId and - personId for fast loading a specified person. largePersonGroupId is - created in LargePersonGroup - Create. Parameter personGroupId and - largePersonGroupId should not be provided at the same time. - :type large_person_group_id: str - :param person_id: Required. Specify a certain person in a person group or - a large person group. personId is created in PersonGroup Person - Create - or LargePersonGroup Person - Create. - :type person_id: str - """ - - _validation = { - 'face_id': {'required': True}, - 'person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'large_person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'person_id': {'required': True}, - } - - _attribute_map = { - 'face_id': {'key': 'faceId', 'type': 'str'}, - 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, - 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, - 'person_id': {'key': 'personId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(VerifyFaceToPersonRequest, self).__init__(**kwargs) - self.face_id = kwargs.get('face_id', None) - self.person_group_id = kwargs.get('person_group_id', None) - self.large_person_group_id = kwargs.get('large_person_group_id', None) - self.person_id = kwargs.get('person_id', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_person_request_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_person_request_py3.py deleted file mode 100644 index b5c7633d2c3a..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_face_to_person_request_py3.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class VerifyFaceToPersonRequest(Model): - """Request body for face to person verification. - - All required parameters must be populated in order to send to Azure. - - :param face_id: Required. FaceId of the face, comes from Face - Detect - :type face_id: str - :param person_group_id: Using existing personGroupId and personId for fast - loading a specified person. personGroupId is created in PersonGroup - - Create. Parameter personGroupId and largePersonGroupId should not be - provided at the same time. - :type person_group_id: str - :param large_person_group_id: Using existing largePersonGroupId and - personId for fast loading a specified person. largePersonGroupId is - created in LargePersonGroup - Create. Parameter personGroupId and - largePersonGroupId should not be provided at the same time. - :type large_person_group_id: str - :param person_id: Required. Specify a certain person in a person group or - a large person group. personId is created in PersonGroup Person - Create - or LargePersonGroup Person - Create. - :type person_id: str - """ - - _validation = { - 'face_id': {'required': True}, - 'person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'large_person_group_id': {'max_length': 64, 'pattern': r'^[a-z0-9-_]+$'}, - 'person_id': {'required': True}, - } - - _attribute_map = { - 'face_id': {'key': 'faceId', 'type': 'str'}, - 'person_group_id': {'key': 'personGroupId', 'type': 'str'}, - 'large_person_group_id': {'key': 'largePersonGroupId', 'type': 'str'}, - 'person_id': {'key': 'personId', 'type': 'str'}, - } - - def __init__(self, *, face_id: str, person_id: str, person_group_id: str=None, large_person_group_id: str=None, **kwargs) -> None: - super(VerifyFaceToPersonRequest, self).__init__(**kwargs) - self.face_id = face_id - self.person_group_id = person_group_id - self.large_person_group_id = large_person_group_id - self.person_id = person_id diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_result.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_result.py deleted file mode 100644 index 1d9fd6649696..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_result.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class VerifyResult(Model): - """Result of the verify operation. - - All required parameters must be populated in order to send to Azure. - - :param is_identical: Required. True if the two faces belong to the same - person or the face belongs to the person, otherwise false. - :type is_identical: bool - :param confidence: Required. A number indicates the similarity confidence - of whether two faces belong to the same person, or whether the face - belongs to the person. By default, isIdentical is set to True if - similarity confidence is greater than or equal to 0.5. This is useful for - advanced users to override "isIdentical" and fine-tune the result on their - own data. - :type confidence: float - """ - - _validation = { - 'is_identical': {'required': True}, - 'confidence': {'required': True}, - } - - _attribute_map = { - 'is_identical': {'key': 'isIdentical', 'type': 'bool'}, - 'confidence': {'key': 'confidence', 'type': 'float'}, - } - - def __init__(self, **kwargs): - super(VerifyResult, self).__init__(**kwargs) - self.is_identical = kwargs.get('is_identical', None) - self.confidence = kwargs.get('confidence', None) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_result_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_result_py3.py deleted file mode 100644 index 9e43db6908a8..000000000000 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/models/verify_result_py3.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class VerifyResult(Model): - """Result of the verify operation. - - All required parameters must be populated in order to send to Azure. - - :param is_identical: Required. True if the two faces belong to the same - person or the face belongs to the person, otherwise false. - :type is_identical: bool - :param confidence: Required. A number indicates the similarity confidence - of whether two faces belong to the same person, or whether the face - belongs to the person. By default, isIdentical is set to True if - similarity confidence is greater than or equal to 0.5. This is useful for - advanced users to override "isIdentical" and fine-tune the result on their - own data. - :type confidence: float - """ - - _validation = { - 'is_identical': {'required': True}, - 'confidence': {'required': True}, - } - - _attribute_map = { - 'is_identical': {'key': 'isIdentical', 'type': 'bool'}, - 'confidence': {'key': 'confidence', 'type': 'float'}, - } - - def __init__(self, *, is_identical: bool, confidence: float, **kwargs) -> None: - super(VerifyResult, self).__init__(**kwargs) - self.is_identical = is_identical - self.confidence = confidence diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/__init__.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/__init__.py index a2d8d59199ae..59416ab4d3e5 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/__init__.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/__init__.py @@ -9,14 +9,14 @@ # regenerated. # -------------------------------------------------------------------------- -from .face_operations import FaceOperations -from .person_group_person_operations import PersonGroupPersonOperations -from .person_group_operations import PersonGroupOperations -from .face_list_operations import FaceListOperations -from .large_person_group_person_operations import LargePersonGroupPersonOperations -from .large_person_group_operations import LargePersonGroupOperations -from .large_face_list_operations import LargeFaceListOperations -from .snapshot_operations import SnapshotOperations +from ._face_operations import FaceOperations +from ._person_group_person_operations import PersonGroupPersonOperations +from ._person_group_operations import PersonGroupOperations +from ._face_list_operations import FaceListOperations +from ._large_person_group_person_operations import LargePersonGroupPersonOperations +from ._large_person_group_operations import LargePersonGroupOperations +from ._large_face_list_operations import LargeFaceListOperations +from ._snapshot_operations import SnapshotOperations __all__ = [ 'FaceOperations', diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_list_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_face_list_operations.py similarity index 99% rename from sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_list_operations.py rename to sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_face_list_operations.py index 9a7420dea8ff..20457119ee27 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_list_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_face_list_operations.py @@ -17,6 +17,8 @@ class FaceListOperations(object): """FaceListOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -177,7 +179,6 @@ def get( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('FaceList', response) @@ -334,7 +335,6 @@ def list( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[FaceList]', response) @@ -510,7 +510,6 @@ def add_face_from_url( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) @@ -638,7 +637,6 @@ def add_face_from_stream( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_face_operations.py similarity index 99% rename from sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_operations.py rename to sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_face_operations.py index 6339928243a6..d2da536c3e62 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/face_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_face_operations.py @@ -17,6 +17,8 @@ class FaceOperations(object): """FaceOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -131,7 +133,6 @@ def find_similar( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[SimilarFace]', response) @@ -205,7 +206,6 @@ def group( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('GroupResult', response) @@ -313,7 +313,6 @@ def identify( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[IdentifyResult]', response) @@ -385,7 +384,6 @@ def verify_face_to_face( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('VerifyResult', response) @@ -551,7 +549,6 @@ def detect_with_url( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[DetectedFace]', response) @@ -624,7 +621,6 @@ def verify_face_to_person( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('VerifyResult', response) @@ -793,7 +789,6 @@ def detect_with_stream( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[DetectedFace]', response) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_face_list_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_large_face_list_operations.py similarity index 99% rename from sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_face_list_operations.py rename to sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_large_face_list_operations.py index ba70e8c6b02f..225794573afc 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_face_list_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_large_face_list_operations.py @@ -17,6 +17,8 @@ class LargeFaceListOperations(object): """LargeFaceListOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -181,7 +183,6 @@ def get( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('LargeFaceList', response) @@ -336,7 +337,6 @@ def get_training_status( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('TrainingStatus', response) @@ -411,7 +411,6 @@ def list( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[LargeFaceList]', response) @@ -569,7 +568,6 @@ def get_face( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) @@ -757,7 +755,6 @@ def add_face_from_url( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) @@ -824,7 +821,6 @@ def list_faces( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[PersistedFace]', response) @@ -957,7 +953,6 @@ def add_face_from_stream( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_person_group_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_large_person_group_operations.py similarity index 99% rename from sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_person_group_operations.py rename to sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_large_person_group_operations.py index be444e6d3c8c..4eb2a64c7dc5 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_person_group_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_large_person_group_operations.py @@ -17,6 +17,8 @@ class LargePersonGroupOperations(object): """LargePersonGroupOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -227,7 +229,6 @@ def get( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('LargePersonGroup', response) @@ -338,7 +339,6 @@ def get_training_status( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('TrainingStatus', response) @@ -420,7 +420,6 @@ def list( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[LargePersonGroup]', response) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_person_group_person_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_large_person_group_person_operations.py similarity index 99% rename from sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_person_group_person_operations.py rename to sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_large_person_group_person_operations.py index a61caa0bdd7b..2e5dc3ffcee8 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/large_person_group_person_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_large_person_group_person_operations.py @@ -17,6 +17,8 @@ class LargePersonGroupPersonOperations(object): """LargePersonGroupPersonOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -86,7 +88,6 @@ def create( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('Person', response) @@ -152,7 +153,6 @@ def list( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[Person]', response) @@ -260,7 +260,6 @@ def get( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('Person', response) @@ -436,7 +435,6 @@ def get_face( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) @@ -631,7 +629,6 @@ def add_face_from_url( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) @@ -768,7 +765,6 @@ def add_face_from_stream( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/person_group_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_person_group_operations.py similarity index 99% rename from sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/person_group_operations.py rename to sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_person_group_operations.py index 90e5381316dd..66cf2d6ca01c 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/person_group_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_person_group_operations.py @@ -17,6 +17,8 @@ class PersonGroupOperations(object): """PersonGroupOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -224,7 +226,6 @@ def get( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersonGroup', response) @@ -332,7 +333,6 @@ def get_training_status( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('TrainingStatus', response) @@ -411,7 +411,6 @@ def list( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[PersonGroup]', response) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/person_group_person_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_person_group_person_operations.py similarity index 99% rename from sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/person_group_person_operations.py rename to sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_person_group_person_operations.py index 8dc83c2c5860..3d62f95c7cee 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/person_group_person_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_person_group_person_operations.py @@ -17,6 +17,8 @@ class PersonGroupPersonOperations(object): """PersonGroupPersonOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -85,7 +87,6 @@ def create( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('Person', response) @@ -150,7 +151,6 @@ def list( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[Person]', response) @@ -256,7 +256,6 @@ def get( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('Person', response) @@ -429,7 +428,6 @@ def get_face( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) @@ -652,7 +650,6 @@ def add_face_from_url( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) @@ -788,7 +785,6 @@ def add_face_from_stream( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('PersistedFace', response) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/snapshot_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_snapshot_operations.py similarity index 99% rename from sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/snapshot_operations.py rename to sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_snapshot_operations.py index ed3d74ea599d..c8724851176a 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/snapshot_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/operations/_snapshot_operations.py @@ -17,6 +17,8 @@ class SnapshotOperations(object): """SnapshotOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -190,7 +192,6 @@ def list( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('[Snapshot]', response) @@ -245,7 +246,6 @@ def get( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('Snapshot', response) @@ -506,7 +506,6 @@ def get_operation_status( raise models.APIErrorException(self._deserialize, response) deserialized = None - if response.status_code == 200: deserialized = self._deserialize('OperationStatus', response) From 23b02bea51a08fac774ec5611168f7d761cd796e Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Thu, 27 Jun 2019 14:09:23 -0700 Subject: [PATCH 3/6] ChangeLog --- .../HISTORY.rst | 20 +++++++++++++++++++ .../cognitiveservices/vision/face/version.py | 2 +- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/HISTORY.rst b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/HISTORY.rst index 697dc9b2b478..9403413dee55 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/HISTORY.rst +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/HISTORY.rst @@ -3,6 +3,26 @@ Release History =============== +0.4.0 (2019-06-27) +++++++++++++++++++ + +**Features** + +- Add "detection_model" to operations when possible. This is a breaking change if you were using positional arguments on some scenarios + +**Breaking changes** + +- Operation FaceListOperations.add_face_from_stream has a new signature +- Operation FaceListOperations.add_face_from_url has a new signature +- Operation FaceOperations.detect_with_stream has a new signature +- Operation FaceOperations.detect_with_url has a new signature +- Operation LargeFaceListOperations.add_face_from_stream has a new signature +- Operation LargeFaceListOperations.add_face_from_url has a new signature +- Operation LargePersonGroupPersonOperations.add_face_from_stream has a new signature +- Operation LargePersonGroupPersonOperations.add_face_from_url has a new signature +- Operation PersonGroupPersonOperations.add_face_from_stream has a new signature +- Operation PersonGroupPersonOperations.add_face_from_url has a new signature + 0.3.0 (2019-03-28) ++++++++++++++++++ diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/version.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/version.py index 3e682bbd5fb1..85da2c00c1a6 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/version.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/azure/cognitiveservices/vision/face/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "0.3.0" +VERSION = "0.4.0" From 4b287160e54da8849a83d64050c446b6fec3040d Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Thu, 27 Jun 2019 14:23:42 -0700 Subject: [PATCH 4/6] Fix Face tests --- .../tests/recordings/test_face_detect.yaml | 13 +- .../tests/recordings/test_snapshot.yaml | 193 ++++++++++++++---- .../tests/test_face.py | 33 +-- 3 files changed, 184 insertions(+), 55 deletions(-) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_face_detect.yaml b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_face_detect.yaml index 042103845148..a869f98dd89e 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_face_detect.yaml +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_face_detect.yaml @@ -266,19 +266,18 @@ interactions: Connection: [keep-alive] Content-Type: [application/octet-stream] Transfer-Encoding: [chunked] - User-Agent: [python/3.7.0 (Windows-10-10.0.17134-SP0) requests/2.19.1 msrest/0.5.4 - azure-cognitiveservices-vision-face/0.3.0] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] X-BingApis-SDK-Client: [Python-SDK] method: POST - uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=age%2Cgender%2CheadPose%2Csmile%2CfacialHair%2Cglasses%2Cemotion%2Chair%2Cmakeup%2Cocclusion%2Caccessories%2Cblur%2Cexposure%2Cnoise&recognitionModel=recognition_01&returnRecognitionModel=false + uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=age%2Cgender%2CheadPose%2Csmile%2CfacialHair%2Cglasses%2Cemotion%2Chair%2Cmakeup%2Cocclusion%2Caccessories%2Cblur%2Cexposure%2Cnoise&recognitionModel=recognition_01&returnRecognitionModel=false&detectionModel=detection_01 response: - body: {string: '[{"faceId":"62b7416d-5fe0-444b-a880-7c8864802144","faceRectangle":{"top":84,"left":31,"width":97,"height":97},"faceAttributes":{"smile":1.0,"headPose":{"pitch":0.0,"roll":-0.7,"yaw":-2.4},"gender":"female","age":47.0,"facialHair":{"moustache":0.0,"beard":0.0,"sideburns":0.0},"glasses":"NoGlasses","emotion":{"anger":0.0,"contempt":0.0,"disgust":0.0,"fear":0.0,"happiness":1.0,"neutral":0.0,"sadness":0.0,"surprise":0.0},"blur":{"blurLevel":"low","value":0.0},"exposure":{"exposureLevel":"goodExposure","value":0.72},"noise":{"noiseLevel":"low","value":0.03},"makeup":{"eyeMakeup":true,"lipMakeup":true},"accessories":[],"occlusion":{"foreheadOccluded":false,"eyeOccluded":false,"mouthOccluded":false},"hair":{"bald":0.66,"invisible":false,"hairColor":[{"color":"blond","confidence":1.0},{"color":"gray","confidence":0.93},{"color":"other","confidence":0.48},{"color":"red","confidence":0.05},{"color":"black","confidence":0.04},{"color":"brown","confidence":0.02}]}}}]'} + body: {string: '[{"faceId":"26a0df3a-75a8-4d0b-a48d-aa50ba21f381","faceRectangle":{"top":84,"left":31,"width":97,"height":97},"faceAttributes":{"smile":1.0,"headPose":{"pitch":0.7,"roll":0.6,"yaw":-7.4},"gender":"female","age":51.0,"facialHair":{"moustache":0.0,"beard":0.0,"sideburns":0.0},"glasses":"NoGlasses","emotion":{"anger":0.0,"contempt":0.0,"disgust":0.0,"fear":0.0,"happiness":1.0,"neutral":0.0,"sadness":0.0,"surprise":0.0},"blur":{"blurLevel":"low","value":0.0},"exposure":{"exposureLevel":"goodExposure","value":0.72},"noise":{"noiseLevel":"low","value":0.03},"makeup":{"eyeMakeup":true,"lipMakeup":true},"accessories":[],"occlusion":{"foreheadOccluded":false,"eyeOccluded":false,"mouthOccluded":false},"hair":{"bald":0.66,"invisible":false,"hairColor":[{"color":"blond","confidence":1.0},{"color":"gray","confidence":0.93},{"color":"other","confidence":0.48},{"color":"red","confidence":0.05},{"color":"black","confidence":0.04},{"color":"brown","confidence":0.02}]}}}]'} headers: - apim-request-id: [2982fda4-a4d4-419a-af72-12f2ba2ddfd2] + apim-request-id: [f017e164-cf85-471b-8bd5-65f037520c36] cache-control: [no-cache] - content-length: ['969'] + content-length: ['968'] content-type: [application/json; charset=utf-8] - date: ['Fri, 29 Mar 2019 23:06:29 GMT'] + date: ['Thu, 27 Jun 2019 21:22:54 GMT'] expires: ['-1'] pragma: [no-cache] strict-transport-security: [max-age=31536000; includeSubDomains; preload] diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_snapshot.yaml b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_snapshot.yaml index dda921ec0a82..b20258bbea5a 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_snapshot.yaml +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_snapshot.yaml @@ -1,23 +1,50 @@ interactions: - request: - body: '{"name": "test", "userData": "test"}' + body: null headers: Accept: [application/json] Accept-Encoding: ['gzip, deflate'] Connection: [keep-alive] - Content-Length: ['36'] + Content-Length: ['0'] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] + X-BingApis-SDK-Client: [Python-SDK] + method: DELETE + uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/persongroups/69ff3e98-2de7-468e-beae-f78aa85200db + response: + body: {string: '{"error":{"code":"PersonGroupNotFound","message":"Person group + is not found.\r\nParameter name: personGroupId"}}'} + headers: + apim-request-id: [4a7724d5-2770-4dee-a312-403d72a60052] + cache-control: [no-cache] + content-length: ['112'] + content-type: [application/json; charset=utf-8] + date: ['Thu, 27 Jun 2019 21:22:55 GMT'] + expires: ['-1'] + pragma: [no-cache] + strict-transport-security: [max-age=31536000; includeSubDomains; preload] + x-aspnet-version: [4.0.30319] + x-content-type-options: [nosniff] + x-powered-by: [ASP.NET] + status: {code: 404, message: Not Found} +- request: + body: '{"name": "test", "userData": "test", "recognitionModel": "recognition_01"}' + headers: + Accept: [application/json] + Accept-Encoding: ['gzip, deflate'] + Connection: [keep-alive] + Content-Length: ['74'] Content-Type: [application/json; charset=utf-8] - User-Agent: [python/3.6.7 (Windows-10-10.0.17763-SP0) msrest/0.6.1 azure-cognitiveservices-vision-face/1.0] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] X-BingApis-SDK-Client: [Python-SDK] method: PUT uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/persongroups/69ff3e98-2de7-468e-beae-f78aa85200db response: body: {string: ''} headers: - apim-request-id: [981ef108-b709-4775-a61f-45b42b74a23e] + apim-request-id: [bbf58cb4-18a1-4da9-a402-61bad5f63f6a] cache-control: [no-cache] content-length: ['0'] - date: ['Fri, 25 Jan 2019 07:41:44 GMT'] + date: ['Thu, 27 Jun 2019 21:22:55 GMT'] expires: ['-1'] pragma: [no-cache] strict-transport-security: [max-age=31536000; includeSubDomains; preload] @@ -27,26 +54,26 @@ interactions: status: {code: 200, message: OK} - request: body: '{"type": "PersonGroup", "objectId": "69ff3e98-2de7-468e-beae-f78aa85200db", - "applyScope": ["Apply-Scope-Subscriptions"]}' + "applyScope": ["00977cdb-163f-435f-9c32-39ec8ae61f4d"]}' headers: Accept: [application/json] Accept-Encoding: ['gzip, deflate'] Connection: [keep-alive] Content-Length: ['131'] Content-Type: [application/json; charset=utf-8] - User-Agent: [python/3.6.7 (Windows-10-10.0.17763-SP0) msrest/0.6.1 azure-cognitiveservices-vision-face/1.0] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] X-BingApis-SDK-Client: [Python-SDK] method: POST uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/snapshots response: body: {string: ''} headers: - apim-request-id: [ea4f76ba-b35a-4683-a4ca-63a9bc6132b1] + apim-request-id: [01cf83a1-9773-4849-8938-9722f0e74c76] cache-control: [no-cache] content-length: ['0'] - date: ['Fri, 25 Jan 2019 07:41:46 GMT'] + date: ['Thu, 27 Jun 2019 21:22:55 GMT'] expires: ['-1'] - operation-location: [/operations/db5ca6cb-f245-4e91-bd23-c64429ced465] + operation-location: [/operations/4ff5f990-b88b-4f46-bc9e-8eb505589bd4] pragma: [no-cache] strict-transport-security: [max-age=31536000; includeSubDomains; preload] x-aspnet-version: [4.0.30319] @@ -59,18 +86,64 @@ interactions: Accept: [application/json] Accept-Encoding: ['gzip, deflate'] Connection: [keep-alive] - User-Agent: [python/3.6.7 (Windows-10-10.0.17763-SP0) msrest/0.6.1 azure-cognitiveservices-vision-face/1.0] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] X-BingApis-SDK-Client: [Python-SDK] method: GET - uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/operations/db5ca6cb-f245-4e91-bd23-c64429ced465 + uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/operations/4ff5f990-b88b-4f46-bc9e-8eb505589bd4 response: - body: {string: '{"CreatedTime":"2019-01-25T07:41:46.3614152Z","LastActionTime":"2019-01-25T07:41:47.1834592Z","Message":null,"ResourceLocation":"/snapshots/4e760f55-6bc9-4bec-9b6e-ceb74dd184d9","Status":"succeeded"}'} + body: {string: '{"createdTime":"2019-06-27T21:22:55.3043353Z","lastActionTime":"2019-06-27T21:22:55.3043353Z","message":null,"resourceLocation":null,"status":"notStarted"}'} headers: - apim-request-id: [6113368c-1148-4fb1-a46c-427bb189ea63] + apim-request-id: [60524070-d2c0-4df8-9c9c-63d53e8abc7b] + cache-control: [no-cache] + content-length: ['155'] + content-type: [application/json; charset=utf-8] + date: ['Thu, 27 Jun 2019 21:22:55 GMT'] + expires: ['-1'] + pragma: [no-cache] + strict-transport-security: [max-age=31536000; includeSubDomains; preload] + x-aspnet-version: [4.0.30319] + x-content-type-options: [nosniff] + x-powered-by: [ASP.NET] + status: {code: 200, message: OK} +- request: + body: null + headers: + Accept: [application/json] + Accept-Encoding: ['gzip, deflate'] + Connection: [keep-alive] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] + X-BingApis-SDK-Client: [Python-SDK] + method: GET + uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/operations/4ff5f990-b88b-4f46-bc9e-8eb505589bd4 + response: + body: {string: '{"error":{"code":"429","message": "Rate limit is exceeded. Try + again in 21 seconds."}}'} + headers: + apim-request-id: [492ef93d-fe4e-4e7a-9b19-c8166096a898] + content-length: ['86'] + content-type: [application/json] + date: ['Thu, 27 Jun 2019 21:22:55 GMT'] + strict-transport-security: [max-age=31536000; includeSubDomains; preload] + x-content-type-options: [nosniff] + status: {code: 429, message: Too Many Requests} +- request: + body: null + headers: + Accept: [application/json] + Accept-Encoding: ['gzip, deflate'] + Connection: [keep-alive] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] + X-BingApis-SDK-Client: [Python-SDK] + method: GET + uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/operations/4ff5f990-b88b-4f46-bc9e-8eb505589bd4 + response: + body: {string: '{"createdTime":"2019-06-27T21:22:55.3043353Z","lastActionTime":"2019-06-27T21:22:56.7620246Z","message":null,"resourceLocation":"/snapshots/8bc8d286-2c39-40b6-8190-4a8a2eb95297","status":"succeeded"}'} + headers: + apim-request-id: [e3f68883-ad66-4d7f-a504-f02cbb4a49ef] cache-control: [no-cache] content-length: ['199'] content-type: [application/json; charset=utf-8] - date: ['Fri, 25 Jan 2019 07:41:47 GMT'] + date: ['Thu, 27 Jun 2019 21:23:16 GMT'] expires: ['-1'] pragma: [no-cache] strict-transport-security: [max-age=31536000; includeSubDomains; preload] @@ -86,19 +159,19 @@ interactions: Connection: [keep-alive] Content-Length: ['73'] Content-Type: [application/json; charset=utf-8] - User-Agent: [python/3.6.7 (Windows-10-10.0.17763-SP0) msrest/0.6.1 azure-cognitiveservices-vision-face/1.0] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] X-BingApis-SDK-Client: [Python-SDK] method: POST - uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/snapshots/4e760f55-6bc9-4bec-9b6e-ceb74dd184d9/apply + uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/snapshots/8bc8d286-2c39-40b6-8190-4a8a2eb95297/apply response: body: {string: ''} headers: - apim-request-id: [72d8e11a-fafe-4259-baf0-69d438b804f2] + apim-request-id: [dead0f63-b310-4e63-8bf2-ffdf04c15fa8] cache-control: [no-cache] content-length: ['0'] - date: ['Fri, 25 Jan 2019 07:41:48 GMT'] + date: ['Thu, 27 Jun 2019 21:23:17 GMT'] expires: ['-1'] - operation-location: [/operations/9bfa552a-a489-4e06-802a-2eb7e973ce29] + operation-location: [/operations/22a4e674-9598-45dd-9612-51ab1aba90eb] pragma: [no-cache] strict-transport-security: [max-age=31536000; includeSubDomains; preload] x-aspnet-version: [4.0.30319] @@ -111,18 +184,68 @@ interactions: Accept: [application/json] Accept-Encoding: ['gzip, deflate'] Connection: [keep-alive] - User-Agent: [python/3.6.7 (Windows-10-10.0.17763-SP0) msrest/0.6.1 azure-cognitiveservices-vision-face/1.0] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] + X-BingApis-SDK-Client: [Python-SDK] + method: GET + uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/operations/22a4e674-9598-45dd-9612-51ab1aba90eb + response: + body: {string: '{"createdTime":"2019-06-27T21:23:18.0324166Z","lastActionTime":"2019-06-27T21:23:18.0324166Z","message":null,"resourceLocation":null,"status":"notStarted"}'} + headers: + apim-request-id: [c116c7aa-997f-4326-b4be-21ebf3851fd6] + cache-control: [no-cache] + content-length: ['155'] + content-type: [application/json; charset=utf-8] + date: ['Thu, 27 Jun 2019 21:23:17 GMT'] + expires: ['-1'] + pragma: [no-cache] + strict-transport-security: [max-age=31536000; includeSubDomains; preload] + x-aspnet-version: [4.0.30319] + x-content-type-options: [nosniff] + x-powered-by: [ASP.NET] + status: {code: 200, message: OK} +- request: + body: null + headers: + Accept: [application/json] + Accept-Encoding: ['gzip, deflate'] + Connection: [keep-alive] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] + X-BingApis-SDK-Client: [Python-SDK] + method: GET + uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/operations/22a4e674-9598-45dd-9612-51ab1aba90eb + response: + body: {string: '{"createdTime":"2019-06-27T21:23:18.0324166Z","lastActionTime":"2019-06-27T21:23:18.0324166Z","message":null,"resourceLocation":null,"status":"notStarted"}'} + headers: + apim-request-id: [4de9d0c4-e1fe-431d-be50-2ababdbe73b8] + cache-control: [no-cache] + content-length: ['155'] + content-type: [application/json; charset=utf-8] + date: ['Thu, 27 Jun 2019 21:23:17 GMT'] + expires: ['-1'] + pragma: [no-cache] + strict-transport-security: [max-age=31536000; includeSubDomains; preload] + x-aspnet-version: [4.0.30319] + x-content-type-options: [nosniff] + x-powered-by: [ASP.NET] + status: {code: 200, message: OK} +- request: + body: null + headers: + Accept: [application/json] + Accept-Encoding: ['gzip, deflate'] + Connection: [keep-alive] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] X-BingApis-SDK-Client: [Python-SDK] method: GET - uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/operations/9bfa552a-a489-4e06-802a-2eb7e973ce29 + uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/operations/22a4e674-9598-45dd-9612-51ab1aba90eb response: - body: {string: '{"CreatedTime":"2019-01-25T07:41:48.8780155Z","LastActionTime":"2019-01-25T07:41:49.2210645Z","Message":null,"ResourceLocation":"/persongroup/fb644ecf-3ed0-4b25-9270-1d174b980afb","Status":"succeeded"}'} + body: {string: '{"createdTime":"2019-06-27T21:23:18.0324166Z","lastActionTime":"2019-06-27T21:23:19.0817675Z","message":null,"resourceLocation":"/persongroup/fb644ecf-3ed0-4b25-9270-1d174b980afb","status":"succeeded"}'} headers: - apim-request-id: [6a1512f6-a3a8-4dc8-b12c-59fc3742cffd] + apim-request-id: [cd45a786-82f1-4e98-a7dc-86c8e967488e] cache-control: [no-cache] content-length: ['201'] content-type: [application/json; charset=utf-8] - date: ['Fri, 25 Jan 2019 07:41:50 GMT'] + date: ['Thu, 27 Jun 2019 21:23:18 GMT'] expires: ['-1'] pragma: [no-cache] strict-transport-security: [max-age=31536000; includeSubDomains; preload] @@ -137,17 +260,17 @@ interactions: Accept-Encoding: ['gzip, deflate'] Connection: [keep-alive] Content-Length: ['0'] - User-Agent: [python/3.6.7 (Windows-10-10.0.17763-SP0) msrest/0.6.1 azure-cognitiveservices-vision-face/1.0] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] X-BingApis-SDK-Client: [Python-SDK] method: DELETE - uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/snapshots/4e760f55-6bc9-4bec-9b6e-ceb74dd184d9 + uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/snapshots/8bc8d286-2c39-40b6-8190-4a8a2eb95297 response: body: {string: ''} headers: - apim-request-id: [549edb9b-0229-498e-ab6d-5997173a59cc] + apim-request-id: [5be1c00d-7d07-44a7-8206-8c3bf8ad4289] cache-control: [no-cache] content-length: ['0'] - date: ['Fri, 25 Jan 2019 07:41:51 GMT'] + date: ['Thu, 27 Jun 2019 21:23:20 GMT'] expires: ['-1'] pragma: [no-cache] strict-transport-security: [max-age=31536000; includeSubDomains; preload] @@ -162,17 +285,17 @@ interactions: Accept-Encoding: ['gzip, deflate'] Connection: [keep-alive] Content-Length: ['0'] - User-Agent: [python/3.6.7 (Windows-10-10.0.17763-SP0) msrest/0.6.1 azure-cognitiveservices-vision-face/1.0] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] X-BingApis-SDK-Client: [Python-SDK] method: DELETE uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/persongroups/69ff3e98-2de7-468e-beae-f78aa85200db response: body: {string: ''} headers: - apim-request-id: [55d6e7db-a270-41d3-a37a-eceffe8f90a0] + apim-request-id: [93a197b1-fc9c-4f5b-b419-48e237e58c03] cache-control: [no-cache] content-length: ['0'] - date: ['Fri, 25 Jan 2019 07:41:52 GMT'] + date: ['Thu, 27 Jun 2019 21:23:21 GMT'] expires: ['-1'] pragma: [no-cache] strict-transport-security: [max-age=31536000; includeSubDomains; preload] @@ -187,17 +310,17 @@ interactions: Accept-Encoding: ['gzip, deflate'] Connection: [keep-alive] Content-Length: ['0'] - User-Agent: [python/3.6.7 (Windows-10-10.0.17763-SP0) msrest/0.6.1 azure-cognitiveservices-vision-face/1.0] + User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] X-BingApis-SDK-Client: [Python-SDK] method: DELETE uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/persongroups/fb644ecf-3ed0-4b25-9270-1d174b980afb response: body: {string: ''} headers: - apim-request-id: [90cd421d-48a3-443a-a9f0-8adb8c3f2f6b] + apim-request-id: [ca84ec64-386f-4f09-9394-3a053336b813] cache-control: [no-cache] content-length: ['0'] - date: ['Fri, 25 Jan 2019 07:41:53 GMT'] + date: ['Thu, 27 Jun 2019 21:23:21 GMT'] expires: ['-1'] pragma: [no-cache] strict-transport-security: [max-age=31536000; includeSubDomains; preload] diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/test_face.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/test_face.py index f330034f9269..b422c5a73167 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/test_face.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/test_face.py @@ -57,7 +57,7 @@ def test_face_detect(self): ) detected = result[0] - self.assertEqual(detected.face_attributes.age, 47.0) + self.assertEqual(detected.face_attributes.age, 51.0) self.assertEqual(detected.face_attributes.gender, Gender.female) self.assertEqual(detected.face_attributes.emotion.happiness, 1.0) @@ -71,44 +71,51 @@ def test_snapshot(self): personGroupId = "69ff3e98-2de7-468e-beae-f78aa85200db" newPersonGroupId = "fb644ecf-3ed0-4b25-9270-1d174b980afb" + try: + face_client.person_group.delete(personGroupId) + except Exception: + pass # Guess this doesn't exist + face_client.person_group.create(personGroupId, "test", "test") # Take a snapshot for the PersonGroup - apply_scope = ["Apply-Scope-Subscriptions"] + apply_scope = [ + self.settings.SUBSCRIPTION_ID + ] snapshot_type = "PersonGroup" takeSnapshotResponse = face_client.snapshot.take(snapshot_type, personGroupId, apply_scope, raw=True) takeOperationId = takeSnapshotResponse.headers["Operation-Location"].split("/")[2] getOperationStatusResponse = face_client.snapshot.get_operation_status(takeOperationId) - operationStatus = getOperationStatusResponse.additional_properties["Status"] - - # Wait for take operation to complete. + operationStatus = getOperationStatusResponse.status + + # Wait for take operation to complete. while operationStatus != "succeeded" and operationStatus != "failed": getOperationStatusResponse = face_client.snapshot.get_operation_status(takeOperationId) - operationStatus = getOperationStatusResponse.additional_properties["Status"] + operationStatus = getOperationStatusResponse.status if self.is_live: sleep(1) - self.assertEqual(operationStatus, "succeeded") + assert operationStatus == "succeeded" + + snapshotId = getOperationStatusResponse.resource_location.split("/")[2] - snapshotId = getOperationStatusResponse.additional_properties["ResourceLocation"].split("/")[2] - # Apply the snapshot to a new PersonGroup. applySnapshotResponse = face_client.snapshot.apply(snapshotId, newPersonGroupId, raw=True) applyOperationId = applySnapshotResponse.headers["Operation-Location"].split("/")[2] applyOperationStatusResponse = face_client.snapshot.get_operation_status(applyOperationId) - operationStatus = applyOperationStatusResponse.additional_properties["Status"] - + operationStatus = applyOperationStatusResponse.status + # Wait for apply operation to complete. while operationStatus != "succeeded" and operationStatus != "failed": applyOperationStatusResponse = face_client.snapshot.get_operation_status(applyOperationId) - operationStatus = applyOperationStatusResponse.additional_properties["Status"] + operationStatus = applyOperationStatusResponse.status if self.is_live: sleep(1) - self.assertEqual(operationStatus, "succeeded") + assert operationStatus == "succeeded" face_client.snapshot.delete(snapshotId) face_client.person_group.delete(personGroupId) From 2a89e10060b3fad8152703553d5846fd7fd2adc5 Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Thu, 27 Jun 2019 21:46:13 +0000 Subject: [PATCH 5/6] Packaging update of azure-cognitiveservices-vision-face --- .../azure-cognitiveservices-vision-face/README.rst | 2 +- .../azure-cognitiveservices-vision-face/setup.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/README.rst b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/README.rst index 6be1dac132cd..09c9230abfce 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/README.rst +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/README.rst @@ -3,7 +3,7 @@ Microsoft Azure SDK for Python This is the Microsoft Azure Cognitive Services Face Client Library. -This package has been tested with Python 2.7, 3.4, 3.5, 3.6 and 3.7. +This package has been tested with Python 2.7, 3.5, 3.6 and 3.7. For a more complete set of Azure libraries, see the `azure `__ bundle package. diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/setup.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/setup.py index 8deb9ba1354f..3abd5d2c2f53 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/setup.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/setup.py @@ -64,7 +64,6 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', From 09175571d375a993db7b79b7649afcb74dce9dc9 Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Thu, 27 Jun 2019 14:47:23 -0700 Subject: [PATCH 6/6] Fix tests --- .../tests/recordings/test_snapshot.yaml | 21 ------------------- 1 file changed, 21 deletions(-) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_snapshot.yaml b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_snapshot.yaml index b20258bbea5a..dcdd674ee845 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_snapshot.yaml +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-face/tests/recordings/test_snapshot.yaml @@ -105,27 +105,6 @@ interactions: x-content-type-options: [nosniff] x-powered-by: [ASP.NET] status: {code: 200, message: OK} -- request: - body: null - headers: - Accept: [application/json] - Accept-Encoding: ['gzip, deflate'] - Connection: [keep-alive] - User-Agent: [python/3.6.3 (Windows-10-10.0.18362-SP0) msrest/0.6.6 azure-cognitiveservices-vision-face/0.4.0] - X-BingApis-SDK-Client: [Python-SDK] - method: GET - uri: https://westus2.api.cognitive.microsoft.com/face/v1.0/operations/4ff5f990-b88b-4f46-bc9e-8eb505589bd4 - response: - body: {string: '{"error":{"code":"429","message": "Rate limit is exceeded. Try - again in 21 seconds."}}'} - headers: - apim-request-id: [492ef93d-fe4e-4e7a-9b19-c8166096a898] - content-length: ['86'] - content-type: [application/json] - date: ['Thu, 27 Jun 2019 21:22:55 GMT'] - strict-transport-security: [max-age=31536000; includeSubDomains; preload] - x-content-type-options: [nosniff] - status: {code: 429, message: Too Many Requests} - request: body: null headers: