diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/CHANGELOG.md b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/CHANGELOG.md index 4b7f8c58ec3e..e1f30a5abc40 100644 --- a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/CHANGELOG.md +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/CHANGELOG.md @@ -1,8 +1,14 @@ # Release History -## 1.0.0b2 (2021-05-24) +## 1.0.0b3 (2021-11-09) -- Updated title and description of the package and updates to readme +- Added device discovery and device detail request for ONVIF enabled devices. +- Added Remote Device Adapter configuration for ingesting video in a private network +- Added retention policy to VideoSink + +## 1.0.0b2 (2021-05-19) + +- Updated the title and description of the package. ## 1.0.0b1 (2021-05-19) diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/README.md b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/README.md index 5be3e445bb8c..c5b81498ac9b 100644 --- a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/README.md +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/README.md @@ -1,6 +1,6 @@ # Azure Video Analyzer Edge client library for Python -Azure Video Analyzer provides a platform to build intelligent video applications that span the edge and the cloud. The platform offers the capability to capture, record, and analyze live video along with publishing the results, video and video analytics, to Azure services in the cloud or the edge. It is designed to be an extensible platform, enabling you to connect different video analysis edge modules (such as Cognitive services containers, custom edge modules built by you with open-source machine learning models or custom models trained with your own data) to it and use them to analyze live video without worrying about the complexity of building and running a live video pipeline. +Azure Video Analyzer is an [Azure Applied AI Service][applied-ai-service] that provides a platform for you to build intelligent video applications that can span both edge and cloud infrastructures. The platform offers the capability to capture, record, and analyze live video along with publishing the results, video and video analytics, to Azure services at the edge or in the cloud. It is designed to be an extensible platform, enabling you to connect different video inferencing edge modules such as Cognitive services modules, or custom inferencing modules that have been trained with your own data using either open-source machine learning or [Azure Machine Learning][machine-learning]. Use the client library for Video Analyzer Edge to: @@ -28,6 +28,8 @@ pip install azure-media-videoanalyzer-edge | SDK | Video Analyzer edge module | |---|---| + | 1.0.0b3 | 1.1 | + | 1.0.0b2 | 1.0 | | 1.0.0b1 | 1.0 | ### Creating a pipeline topology and making requests @@ -148,3 +150,5 @@ additional questions or comments. [iot-device-sdk]: https://pypi.org/project/azure-iot-device/ [iot-hub-sdk]: https://pypi.org/project/azure-iot-hub/ [github-page-issues]: https://github.com/Azure/azure-sdk-for-python/issues +[applied-ai-service]: https://azure.microsoft.com/product-categories/applied-ai-services/#services +[machine-learning]: https://azure.microsoft.com/services/machine-learning diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/__init__.py b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/__init__.py index c30621a55bb6..4a86b4aac81a 100644 --- a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/__init__.py +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/__init__.py @@ -27,3 +27,12 @@ def _OverrideInstanceSetRequestSerialize(self): return live_pipeline_body.serialize() LivePipelineSetRequest.serialize = _OverrideInstanceSetRequestSerialize + +def _OverrideRemoteDeviceAdapterSetRequestSerialize(self): + remote_device_adapter_body = RemoteDeviceAdapterSetRequestBody(name=self.remote_device_adapter.name) + remote_device_adapter_body.system_data = self.remote_device_adapter.system_data + remote_device_adapter_body.properties = self.remote_device_adapter.properties + + return remote_device_adapter_body.serialize() + +RemoteDeviceAdapterSetRequest.serialize = _OverrideRemoteDeviceAdapterSetRequestSerialize diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/_vendor.py b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/_vendor.py new file mode 100644 index 000000000000..9a223d15524c --- /dev/null +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/_vendor.py @@ -0,0 +1,15 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.pipeline.transport import HttpRequest + +def _convert_request(request, files=None): + data = request.content if not files else None + request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data) + if files: + request.set_formdata_body(files) + return request diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/__init__.py b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/__init__.py index 9604f73040dc..097ed5f499bd 100644 --- a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/__init__.py +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/__init__.py @@ -10,11 +10,14 @@ from ._models_py3 import CertificateSource from ._models_py3 import CognitiveServicesVisionProcessor from ._models_py3 import CredentialsBase + from ._models_py3 import DiscoveredOnvifDevice + from ._models_py3 import DiscoveredOnvifDeviceCollection from ._models_py3 import EndpointBase from ._models_py3 import ExtensionProcessorBase from ._models_py3 import FileSink from ._models_py3 import GrpcExtension from ._models_py3 import GrpcExtensionDataTransfer + from ._models_py3 import H264Configuration from ._models_py3 import HttpExtension from ._models_py3 import HttpHeaderCredentials from ._models_py3 import ImageFormatBmp @@ -24,6 +27,7 @@ from ._models_py3 import ImageFormatRaw from ._models_py3 import ImageProperties from ._models_py3 import ImageScale + from ._models_py3 import IotHubDeviceConnection from ._models_py3 import IotHubMessageSink from ._models_py3 import IotHubMessageSource from ._models_py3 import LineCrossingProcessor @@ -37,6 +41,9 @@ from ._models_py3 import LivePipelineProperties from ._models_py3 import LivePipelineSetRequest from ._models_py3 import LivePipelineSetRequestBody + from ._models_py3 import MPEG4Configuration + from ._models_py3 import MediaProfile + from ._models_py3 import MediaUri from ._models_py3 import MethodRequest from ._models_py3 import MethodRequestEmptyBodyBase from ._models_py3 import MotionDetectionProcessor @@ -46,6 +53,12 @@ from ._models_py3 import NamedPolygonString from ._models_py3 import NodeInput from ._models_py3 import ObjectTrackingProcessor + from ._models_py3 import OnvifDevice + from ._models_py3 import OnvifDeviceDiscoverRequest + from ._models_py3 import OnvifDeviceGetRequest + from ._models_py3 import OnvifDns + from ._models_py3 import OnvifHostName + from ._models_py3 import OnvifSystemDateTime from ._models_py3 import OutputSelector from ._models_py3 import ParameterDeclaration from ._models_py3 import ParameterDefinition @@ -59,6 +72,16 @@ from ._models_py3 import PipelineTopologySetRequest from ._models_py3 import PipelineTopologySetRequestBody from ._models_py3 import ProcessorNodeBase + from ._models_py3 import RateControl + from ._models_py3 import RemoteDeviceAdapter + from ._models_py3 import RemoteDeviceAdapterCollection + from ._models_py3 import RemoteDeviceAdapterDeleteRequest + from ._models_py3 import RemoteDeviceAdapterGetRequest + from ._models_py3 import RemoteDeviceAdapterListRequest + from ._models_py3 import RemoteDeviceAdapterProperties + from ._models_py3 import RemoteDeviceAdapterSetRequest + from ._models_py3 import RemoteDeviceAdapterSetRequestBody + from ._models_py3 import RemoteDeviceAdapterTarget from ._models_py3 import RtspSource from ._models_py3 import SamplingOptions from ._models_py3 import SignalGateProcessor @@ -80,22 +103,29 @@ from ._models_py3 import SpatialAnalysisPersonZoneCrossingOperation from ._models_py3 import SpatialAnalysisPersonZoneCrossingZoneEvents from ._models_py3 import SpatialAnalysisTypedOperationBase + from ._models_py3 import SymmetricKeyCredentials from ._models_py3 import SystemData from ._models_py3 import TlsEndpoint from ._models_py3 import TlsValidationOptions from ._models_py3 import UnsecuredEndpoint from ._models_py3 import UsernamePasswordCredentials from ._models_py3 import VideoCreationProperties + from ._models_py3 import VideoEncoderConfiguration + from ._models_py3 import VideoPublishingOptions + from ._models_py3 import VideoResolution from ._models_py3 import VideoSink except (SyntaxError, ImportError): from ._models import CertificateSource # type: ignore from ._models import CognitiveServicesVisionProcessor # type: ignore from ._models import CredentialsBase # type: ignore + from ._models import DiscoveredOnvifDevice # type: ignore + from ._models import DiscoveredOnvifDeviceCollection # type: ignore from ._models import EndpointBase # type: ignore from ._models import ExtensionProcessorBase # type: ignore from ._models import FileSink # type: ignore from ._models import GrpcExtension # type: ignore from ._models import GrpcExtensionDataTransfer # type: ignore + from ._models import H264Configuration # type: ignore from ._models import HttpExtension # type: ignore from ._models import HttpHeaderCredentials # type: ignore from ._models import ImageFormatBmp # type: ignore @@ -105,6 +135,7 @@ from ._models import ImageFormatRaw # type: ignore from ._models import ImageProperties # type: ignore from ._models import ImageScale # type: ignore + from ._models import IotHubDeviceConnection # type: ignore from ._models import IotHubMessageSink # type: ignore from ._models import IotHubMessageSource # type: ignore from ._models import LineCrossingProcessor # type: ignore @@ -118,6 +149,9 @@ from ._models import LivePipelineProperties # type: ignore from ._models import LivePipelineSetRequest # type: ignore from ._models import LivePipelineSetRequestBody # type: ignore + from ._models import MPEG4Configuration # type: ignore + from ._models import MediaProfile # type: ignore + from ._models import MediaUri # type: ignore from ._models import MethodRequest # type: ignore from ._models import MethodRequestEmptyBodyBase # type: ignore from ._models import MotionDetectionProcessor # type: ignore @@ -127,6 +161,12 @@ from ._models import NamedPolygonString # type: ignore from ._models import NodeInput # type: ignore from ._models import ObjectTrackingProcessor # type: ignore + from ._models import OnvifDevice # type: ignore + from ._models import OnvifDeviceDiscoverRequest # type: ignore + from ._models import OnvifDeviceGetRequest # type: ignore + from ._models import OnvifDns # type: ignore + from ._models import OnvifHostName # type: ignore + from ._models import OnvifSystemDateTime # type: ignore from ._models import OutputSelector # type: ignore from ._models import ParameterDeclaration # type: ignore from ._models import ParameterDefinition # type: ignore @@ -140,6 +180,16 @@ from ._models import PipelineTopologySetRequest # type: ignore from ._models import PipelineTopologySetRequestBody # type: ignore from ._models import ProcessorNodeBase # type: ignore + from ._models import RateControl # type: ignore + from ._models import RemoteDeviceAdapter # type: ignore + from ._models import RemoteDeviceAdapterCollection # type: ignore + from ._models import RemoteDeviceAdapterDeleteRequest # type: ignore + from ._models import RemoteDeviceAdapterGetRequest # type: ignore + from ._models import RemoteDeviceAdapterListRequest # type: ignore + from ._models import RemoteDeviceAdapterProperties # type: ignore + from ._models import RemoteDeviceAdapterSetRequest # type: ignore + from ._models import RemoteDeviceAdapterSetRequestBody # type: ignore + from ._models import RemoteDeviceAdapterTarget # type: ignore from ._models import RtspSource # type: ignore from ._models import SamplingOptions # type: ignore from ._models import SignalGateProcessor # type: ignore @@ -161,21 +211,28 @@ from ._models import SpatialAnalysisPersonZoneCrossingOperation # type: ignore from ._models import SpatialAnalysisPersonZoneCrossingZoneEvents # type: ignore from ._models import SpatialAnalysisTypedOperationBase # type: ignore + from ._models import SymmetricKeyCredentials # type: ignore from ._models import SystemData # type: ignore from ._models import TlsEndpoint # type: ignore from ._models import TlsValidationOptions # type: ignore from ._models import UnsecuredEndpoint # type: ignore from ._models import UsernamePasswordCredentials # type: ignore from ._models import VideoCreationProperties # type: ignore + from ._models import VideoEncoderConfiguration # type: ignore + from ._models import VideoPublishingOptions # type: ignore + from ._models import VideoResolution # type: ignore from ._models import VideoSink # type: ignore from ._azure_video_analyzerfor_edge_enums import ( GrpcExtensionDataTransferMode, + H264Profile, ImageFormatRawPixelFormat, ImageScaleMode, LivePipelineState, + MPEG4Profile, MotionDetectionSensitivity, ObjectTrackingAccuracy, + OnvifSystemDateTimeType, OutputSelectorOperator, OutputSelectorProperty, ParameterType, @@ -184,17 +241,21 @@ SpatialAnalysisPersonCountEventTrigger, SpatialAnalysisPersonDistanceEventTrigger, SpatialAnalysisPersonZoneCrossingEventType, + VideoEncoding, ) __all__ = [ 'CertificateSource', 'CognitiveServicesVisionProcessor', 'CredentialsBase', + 'DiscoveredOnvifDevice', + 'DiscoveredOnvifDeviceCollection', 'EndpointBase', 'ExtensionProcessorBase', 'FileSink', 'GrpcExtension', 'GrpcExtensionDataTransfer', + 'H264Configuration', 'HttpExtension', 'HttpHeaderCredentials', 'ImageFormatBmp', @@ -204,6 +265,7 @@ 'ImageFormatRaw', 'ImageProperties', 'ImageScale', + 'IotHubDeviceConnection', 'IotHubMessageSink', 'IotHubMessageSource', 'LineCrossingProcessor', @@ -217,6 +279,9 @@ 'LivePipelineProperties', 'LivePipelineSetRequest', 'LivePipelineSetRequestBody', + 'MPEG4Configuration', + 'MediaProfile', + 'MediaUri', 'MethodRequest', 'MethodRequestEmptyBodyBase', 'MotionDetectionProcessor', @@ -226,6 +291,12 @@ 'NamedPolygonString', 'NodeInput', 'ObjectTrackingProcessor', + 'OnvifDevice', + 'OnvifDeviceDiscoverRequest', + 'OnvifDeviceGetRequest', + 'OnvifDns', + 'OnvifHostName', + 'OnvifSystemDateTime', 'OutputSelector', 'ParameterDeclaration', 'ParameterDefinition', @@ -239,6 +310,16 @@ 'PipelineTopologySetRequest', 'PipelineTopologySetRequestBody', 'ProcessorNodeBase', + 'RateControl', + 'RemoteDeviceAdapter', + 'RemoteDeviceAdapterCollection', + 'RemoteDeviceAdapterDeleteRequest', + 'RemoteDeviceAdapterGetRequest', + 'RemoteDeviceAdapterListRequest', + 'RemoteDeviceAdapterProperties', + 'RemoteDeviceAdapterSetRequest', + 'RemoteDeviceAdapterSetRequestBody', + 'RemoteDeviceAdapterTarget', 'RtspSource', 'SamplingOptions', 'SignalGateProcessor', @@ -260,19 +341,26 @@ 'SpatialAnalysisPersonZoneCrossingOperation', 'SpatialAnalysisPersonZoneCrossingZoneEvents', 'SpatialAnalysisTypedOperationBase', + 'SymmetricKeyCredentials', 'SystemData', 'TlsEndpoint', 'TlsValidationOptions', 'UnsecuredEndpoint', 'UsernamePasswordCredentials', 'VideoCreationProperties', + 'VideoEncoderConfiguration', + 'VideoPublishingOptions', + 'VideoResolution', 'VideoSink', 'GrpcExtensionDataTransferMode', + 'H264Profile', 'ImageFormatRawPixelFormat', 'ImageScaleMode', 'LivePipelineState', + 'MPEG4Profile', 'MotionDetectionSensitivity', 'ObjectTrackingAccuracy', + 'OnvifSystemDateTimeType', 'OutputSelectorOperator', 'OutputSelectorProperty', 'ParameterType', @@ -281,4 +369,5 @@ 'SpatialAnalysisPersonCountEventTrigger', 'SpatialAnalysisPersonDistanceEventTrigger', 'SpatialAnalysisPersonZoneCrossingEventType', + 'VideoEncoding', ] diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_azure_video_analyzerfor_edge_enums.py b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_azure_video_analyzerfor_edge_enums.py index 3b80cbeecd08..35371307bdbb 100644 --- a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_azure_video_analyzerfor_edge_enums.py +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_azure_video_analyzerfor_edge_enums.py @@ -6,27 +6,12 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from enum import Enum, EnumMeta +from enum import Enum from six import with_metaclass +from azure.core import CaseInsensitiveEnumMeta -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class GrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class GrpcExtensionDataTransferMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Data transfer mode: embedded or sharedMemory. """ @@ -39,7 +24,16 @@ class GrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str #: sharing the same shared memory space. SHARED_MEMORY = "sharedMemory" -class ImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class H264Profile(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """The H264 Profile + """ + + BASELINE = "Baseline" + MAIN = "Main" + EXTENDED = "Extended" + HIGH = "High" + +class ImageFormatRawPixelFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Pixel format to be applied to the raw image. """ @@ -66,7 +60,7 @@ class ImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, En #: Packed BGRA 8:8:8:8, 32bpp, BGRABGRA. BGRA = "bgra" -class ImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class ImageScaleMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Describes the image scaling mode to be applied. Default mode is 'pad'. """ @@ -82,7 +76,7 @@ class ImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): #: Stretches the original image so it resized to the specified dimensions. STRETCH = "stretch" -class LivePipelineState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class LivePipelineState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Current pipeline state (read-only). """ @@ -98,7 +92,7 @@ class LivePipelineState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): #: The live pipeline is transitioning into the inactive state. DEACTIVATING = "deactivating" -class MotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class MotionDetectionSensitivity(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Motion detection sensitivity: low, medium, high. """ @@ -109,7 +103,16 @@ class MotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, E #: High sensitivity. HIGH = "high" -class ObjectTrackingAccuracy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class MPEG4Profile(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """The MPEG4 Profile + """ + + #: Simple Profile. + SP = "SP" + #: Advanced Simple Profile. + ASP = "ASP" + +class ObjectTrackingAccuracy(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Object tracker accuracy: low, medium, high. Higher accuracy leads to higher CPU consumption in average. """ @@ -121,7 +124,14 @@ class ObjectTrackingAccuracy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum) #: High accuracy. HIGH = "high" -class OutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class OnvifSystemDateTimeType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """An enum value determining whether the date time was configured using NTP or manual. + """ + + NTP = "Ntp" + MANUAL = "Manual" + +class OutputSelectorOperator(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The operator to compare properties by. """ @@ -130,14 +140,14 @@ class OutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum) #: The property is not of the type defined by value. IS_NOT = "isNot" -class OutputSelectorProperty(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class OutputSelectorProperty(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The property of the data stream to be used as the selection criteria. """ #: The stream's MIME type or subtype: audio, video or application. MEDIA_TYPE = "mediaType" -class ParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class ParameterType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Type of the parameter. """ @@ -152,7 +162,7 @@ class ParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): #: The parameter's value is a boolean value that is either true or false. BOOL = "bool" -class RtspTransport(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class RtspTransport(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP @@ -166,7 +176,7 @@ class RtspTransport(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): #: channel. TCP = "tcp" -class SpatialAnalysisOperationFocus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class SpatialAnalysisOperationFocus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The operation focus type. """ @@ -177,7 +187,7 @@ class SpatialAnalysisOperationFocus(with_metaclass(_CaseInsensitiveEnumMeta, str #: The footprint. FOOTPRINT = "footprint" -class SpatialAnalysisPersonCountEventTrigger(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class SpatialAnalysisPersonCountEventTrigger(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The event trigger type. """ @@ -186,7 +196,7 @@ class SpatialAnalysisPersonCountEventTrigger(with_metaclass(_CaseInsensitiveEnum #: Interval trigger. INTERVAL = "interval" -class SpatialAnalysisPersonDistanceEventTrigger(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class SpatialAnalysisPersonDistanceEventTrigger(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The event trigger type. """ @@ -195,7 +205,7 @@ class SpatialAnalysisPersonDistanceEventTrigger(with_metaclass(_CaseInsensitiveE #: Interval trigger. INTERVAL = "interval" -class SpatialAnalysisPersonZoneCrossingEventType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): +class SpatialAnalysisPersonZoneCrossingEventType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The event type. """ @@ -203,3 +213,14 @@ class SpatialAnalysisPersonZoneCrossingEventType(with_metaclass(_CaseInsensitive ZONE_CROSSING = "zoneCrossing" #: Zone dwell time event type. ZONE_DWELL_TIME = "zoneDwellTime" + +class VideoEncoding(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): + """The video codec used by the Media Profile. + """ + + #: The Media Profile uses JPEG encoding. + JPEG = "JPEG" + #: The Media Profile uses H264 encoding. + H264 = "H264" + #: The Media Profile uses MPEG4 encoding. + MPEG4 = "MPEG4" diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_models.py b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_models.py index e0bc4bb2f07f..fc9639f4f6ac 100644 --- a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_models.py +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_models.py @@ -17,8 +17,8 @@ class CertificateSource(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -37,6 +37,8 @@ def __init__( self, **kwargs ): + """ + """ super(CertificateSource, self).__init__(**kwargs) self.type = None # type: Optional[str] @@ -49,13 +51,13 @@ class ProcessorNodeBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] """ _validation = { @@ -78,6 +80,13 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + """ super(ProcessorNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = kwargs['name'] @@ -89,23 +98,23 @@ class CognitiveServicesVisionProcessor(ProcessorNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties - :param sampling_options: Describes the sampling options to be applied when forwarding samples - to the extension. - :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions - :param operation: Required. Describes the Spatial Analysis operation to be used in the - Cognitive Services Vision processor. - :type operation: ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationBase + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar endpoint: Required. Endpoint to which this processor should connect. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :ivar image: Describes the parameters of the image that is sent as input to the endpoint. + :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :ivar sampling_options: Describes the sampling options to be applied when forwarding samples to + the extension. + :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :ivar operation: Required. Describes the Spatial Analysis operation to be used in the Cognitive + Services Vision processor. + :vartype operation: ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationBase """ _validation = { @@ -130,6 +139,23 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword endpoint: Required. Endpoint to which this processor should connect. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :keyword image: Describes the parameters of the image that is sent as input to the endpoint. + :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :keyword sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :keyword operation: Required. Describes the Spatial Analysis operation to be used in the + Cognitive Services Vision processor. + :paramtype operation: ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationBase + """ super(CognitiveServicesVisionProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.CognitiveServicesVisionProcessor' # type: str self.endpoint = kwargs['endpoint'] @@ -142,12 +168,12 @@ class CredentialsBase(msrest.serialization.Model): """Base class for credential objects. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: HttpHeaderCredentials, UsernamePasswordCredentials. + sub-classes are: HttpHeaderCredentials, SymmetricKeyCredentials, UsernamePasswordCredentials. All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -159,17 +185,92 @@ class CredentialsBase(msrest.serialization.Model): } _subtype_map = { - 'type': {'#Microsoft.VideoAnalyzer.HttpHeaderCredentials': 'HttpHeaderCredentials', '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} + 'type': {'#Microsoft.VideoAnalyzer.HttpHeaderCredentials': 'HttpHeaderCredentials', '#Microsoft.VideoAnalyzer.SymmetricKeyCredentials': 'SymmetricKeyCredentials', '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self, **kwargs ): + """ + """ super(CredentialsBase, self).__init__(**kwargs) self.type = None # type: Optional[str] +class DiscoveredOnvifDevice(msrest.serialization.Model): + """The discovered properties of the ONVIF device that are returned during the discovery. + + :ivar service_identifier: The unique identifier of the ONVIF device that was discovered in the + same subnet as the IoT Edge device. + :vartype service_identifier: str + :ivar remote_ip_address: The IP address of the ONVIF device that was discovered in the same + subnet as the IoT Edge device. + :vartype remote_ip_address: str + :ivar scopes: An array of hostnames for the ONVIF discovered devices that are in the same + subnet as the IoT Edge device. + :vartype scopes: list[str] + :ivar endpoints: An array of media profile endpoints that the ONVIF discovered device supports. + :vartype endpoints: list[str] + """ + + _attribute_map = { + 'service_identifier': {'key': 'serviceIdentifier', 'type': 'str'}, + 'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'}, + 'scopes': {'key': 'scopes', 'type': '[str]'}, + 'endpoints': {'key': 'endpoints', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword service_identifier: The unique identifier of the ONVIF device that was discovered in + the same subnet as the IoT Edge device. + :paramtype service_identifier: str + :keyword remote_ip_address: The IP address of the ONVIF device that was discovered in the same + subnet as the IoT Edge device. + :paramtype remote_ip_address: str + :keyword scopes: An array of hostnames for the ONVIF discovered devices that are in the same + subnet as the IoT Edge device. + :paramtype scopes: list[str] + :keyword endpoints: An array of media profile endpoints that the ONVIF discovered device + supports. + :paramtype endpoints: list[str] + """ + super(DiscoveredOnvifDevice, self).__init__(**kwargs) + self.service_identifier = kwargs.get('service_identifier', None) + self.remote_ip_address = kwargs.get('remote_ip_address', None) + self.scopes = kwargs.get('scopes', None) + self.endpoints = kwargs.get('endpoints', None) + + +class DiscoveredOnvifDeviceCollection(msrest.serialization.Model): + """A list of ONVIF devices that were discovered in the same subnet as the IoT Edge device. + + :ivar value: An array of ONVIF devices that have been discovered in the same subnet as the IoT + Edge device. + :vartype value: list[~azure.media.videoanalyzer.edge.models.DiscoveredOnvifDevice] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[DiscoveredOnvifDevice]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword value: An array of ONVIF devices that have been discovered in the same subnet as the + IoT Edge device. + :paramtype value: list[~azure.media.videoanalyzer.edge.models.DiscoveredOnvifDevice] + """ + super(DiscoveredOnvifDeviceCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + + class EndpointBase(msrest.serialization.Model): """Base class for endpoints. @@ -178,12 +279,12 @@ class EndpointBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param credentials: Credentials to be presented to the endpoint. - :type credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase - :param url: Required. The endpoint URL for Video Analyzer to connect to. - :type url: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar credentials: Credentials to be presented to the endpoint. + :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :ivar url: Required. The endpoint URL for Video Analyzer to connect to. + :vartype url: str """ _validation = { @@ -205,6 +306,12 @@ def __init__( self, **kwargs ): + """ + :keyword credentials: Credentials to be presented to the endpoint. + :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :keyword url: Required. The endpoint URL for Video Analyzer to connect to. + :paramtype url: str + """ super(EndpointBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.credentials = kwargs.get('credentials', None) @@ -219,21 +326,21 @@ class ExtensionProcessorBase(ProcessorNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param endpoint: Required. Endpoint details of the pipeline extension plugin. - :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase - :param image: Required. Image transformations and formatting options to be applied to the video + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar endpoint: Required. Endpoint details of the pipeline extension plugin. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :ivar image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. - :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties - :param sampling_options: Media sampling parameters that define how often media is submitted to + :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :ivar sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. - :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions """ _validation = { @@ -261,6 +368,21 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword endpoint: Required. Endpoint details of the pipeline extension plugin. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :keyword image: Required. Image transformations and formatting options to be applied to the + video frame(s) prior submission to the pipeline extension plugin. + :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :keyword sampling_options: Media sampling parameters that define how often media is submitted + to the extension plugin. + :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + """ super(ExtensionProcessorBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ExtensionProcessorBase' # type: str self.endpoint = kwargs['endpoint'] @@ -276,13 +398,13 @@ class SinkNodeBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] """ _validation = { @@ -305,6 +427,13 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + """ super(SinkNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = kwargs['name'] @@ -316,22 +445,22 @@ class FileSink(SinkNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param base_directory_path: Required. Absolute directory path where media files will be stored. - :type base_directory_path: str - :param file_name_pattern: Required. File name pattern for creating new files when performing + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar base_directory_path: Required. Absolute directory path where media files will be stored. + :vartype base_directory_path: str + :ivar file_name_pattern: Required. File name pattern for creating new files when performing event based recording. The pattern must include at least one system variable. - :type file_name_pattern: str - :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing + :vartype file_name_pattern: str + :ivar maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing files from this sink. Once this limit is reached, the oldest files from this sink will be automatically deleted. - :type maximum_size_mi_b: str + :vartype maximum_size_mi_b: str """ _validation = { @@ -356,6 +485,23 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword base_directory_path: Required. Absolute directory path where media files will be + stored. + :paramtype base_directory_path: str + :keyword file_name_pattern: Required. File name pattern for creating new files when performing + event based recording. The pattern must include at least one system variable. + :paramtype file_name_pattern: str + :keyword maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing + files from this sink. Once this limit is reached, the oldest files from this sink will be + automatically deleted. + :paramtype maximum_size_mi_b: str + """ super(FileSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.FileSink' # type: str self.base_directory_path = kwargs['base_directory_path'] @@ -368,27 +514,27 @@ class GrpcExtension(ExtensionProcessorBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param endpoint: Required. Endpoint details of the pipeline extension plugin. - :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase - :param image: Required. Image transformations and formatting options to be applied to the video + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar endpoint: Required. Endpoint details of the pipeline extension plugin. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :ivar image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. - :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties - :param sampling_options: Media sampling parameters that define how often media is submitted to + :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :ivar sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. - :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions - :param data_transfer: Required. Specifies how media is transferred to the extension plugin. - :type data_transfer: ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransfer - :param extension_configuration: An optional configuration string that is sent to the extension + :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :ivar data_transfer: Required. Specifies how media is transferred to the extension plugin. + :vartype data_transfer: ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransfer + :ivar extension_configuration: An optional configuration string that is sent to the extension plugin. The configuration string is specific to each custom extension and it not understood neither validated by Video Analyzer. Please see https://aka.ms/ava-extension-grpc for details. - :type extension_configuration: str + :vartype extension_configuration: str """ _validation = { @@ -415,6 +561,28 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword endpoint: Required. Endpoint details of the pipeline extension plugin. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :keyword image: Required. Image transformations and formatting options to be applied to the + video frame(s) prior submission to the pipeline extension plugin. + :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :keyword sampling_options: Media sampling parameters that define how often media is submitted + to the extension plugin. + :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :keyword data_transfer: Required. Specifies how media is transferred to the extension plugin. + :paramtype data_transfer: ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransfer + :keyword extension_configuration: An optional configuration string that is sent to the + extension plugin. The configuration string is specific to each custom extension and it not + understood neither validated by Video Analyzer. Please see https://aka.ms/ava-extension-grpc + for details. + :paramtype extension_configuration: str + """ super(GrpcExtension, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.GrpcExtension' # type: str self.data_transfer = kwargs['data_transfer'] @@ -426,12 +594,12 @@ class GrpcExtensionDataTransfer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param shared_memory_size_mi_b: The share memory buffer for sample transfers, in mebibytes. It + :ivar shared_memory_size_mi_b: The share memory buffer for sample transfers, in mebibytes. It can only be used with the 'SharedMemory' transfer mode. - :type shared_memory_size_mi_b: str - :param mode: Required. Data transfer mode: embedded or sharedMemory. Possible values include: + :vartype shared_memory_size_mi_b: str + :ivar mode: Required. Data transfer mode: embedded or sharedMemory. Possible values include: "embedded", "sharedMemory". - :type mode: str or ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransferMode + :vartype mode: str or ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransferMode """ _validation = { @@ -447,31 +615,70 @@ def __init__( self, **kwargs ): + """ + :keyword shared_memory_size_mi_b: The share memory buffer for sample transfers, in mebibytes. + It can only be used with the 'SharedMemory' transfer mode. + :paramtype shared_memory_size_mi_b: str + :keyword mode: Required. Data transfer mode: embedded or sharedMemory. Possible values include: + "embedded", "sharedMemory". + :paramtype mode: str or ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransferMode + """ super(GrpcExtensionDataTransfer, self).__init__(**kwargs) self.shared_memory_size_mi_b = kwargs.get('shared_memory_size_mi_b', None) self.mode = kwargs['mode'] +class H264Configuration(msrest.serialization.Model): + """Class representing the H264 Configuration. + + :ivar gov_length: Group of Video frames length. + :vartype gov_length: float + :ivar profile: The H264 Profile. Possible values include: "Baseline", "Main", "Extended", + "High". + :vartype profile: str or ~azure.media.videoanalyzer.edge.models.H264Profile + """ + + _attribute_map = { + 'gov_length': {'key': 'govLength', 'type': 'float'}, + 'profile': {'key': 'profile', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword gov_length: Group of Video frames length. + :paramtype gov_length: float + :keyword profile: The H264 Profile. Possible values include: "Baseline", "Main", "Extended", + "High". + :paramtype profile: str or ~azure.media.videoanalyzer.edge.models.H264Profile + """ + super(H264Configuration, self).__init__(**kwargs) + self.gov_length = kwargs.get('gov_length', None) + self.profile = kwargs.get('profile', None) + + class HttpExtension(ExtensionProcessorBase): """HTTP extension processor allows pipeline extension plugins to be connected to the pipeline through over the HTTP protocol. Extension plugins must act as an HTTP server. Please see https://aka.ms/ava-extension-http for details. All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param endpoint: Required. Endpoint details of the pipeline extension plugin. - :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase - :param image: Required. Image transformations and formatting options to be applied to the video + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar endpoint: Required. Endpoint details of the pipeline extension plugin. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :ivar image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. - :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties - :param sampling_options: Media sampling parameters that define how often media is submitted to + :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :ivar sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. - :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions """ _validation = { @@ -495,6 +702,21 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword endpoint: Required. Endpoint details of the pipeline extension plugin. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :keyword image: Required. Image transformations and formatting options to be applied to the + video frame(s) prior submission to the pipeline extension plugin. + :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :keyword sampling_options: Media sampling parameters that define how often media is submitted + to the extension plugin. + :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + """ super(HttpExtension, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.HttpExtension' # type: str @@ -504,14 +726,14 @@ class HttpHeaderCredentials(CredentialsBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param header_name: Required. HTTP header name. - :type header_name: str - :param header_value: Required. HTTP header value. It is recommended that this value is + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar header_name: Required. HTTP header name. + :vartype header_name: str + :ivar header_value: Required. HTTP header value. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. - :type header_value: str + :vartype header_value: str """ _validation = { @@ -530,6 +752,14 @@ def __init__( self, **kwargs ): + """ + :keyword header_name: Required. HTTP header name. + :paramtype header_name: str + :keyword header_value: Required. HTTP header value. It is recommended that this value is + parameterized as a secret string in order to prevent this value to be returned as part of the + resource on API requests. + :paramtype header_value: str + """ super(HttpHeaderCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.HttpHeaderCredentials' # type: str self.header_name = kwargs['header_name'] @@ -544,8 +774,8 @@ class ImageFormatProperties(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -564,6 +794,8 @@ def __init__( self, **kwargs ): + """ + """ super(ImageFormatProperties, self).__init__(**kwargs) self.type = None # type: Optional[str] @@ -573,8 +805,8 @@ class ImageFormatBmp(ImageFormatProperties): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -589,6 +821,8 @@ def __init__( self, **kwargs ): + """ + """ super(ImageFormatBmp, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatBmp' # type: str @@ -598,10 +832,10 @@ class ImageFormatJpeg(ImageFormatProperties): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param quality: Image quality value between 0 to 100 (best quality). - :type quality: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar quality: Image quality value between 0 to 100 (best quality). + :vartype quality: str """ _validation = { @@ -617,6 +851,10 @@ def __init__( self, **kwargs ): + """ + :keyword quality: Image quality value between 0 to 100 (best quality). + :paramtype quality: str + """ super(ImageFormatJpeg, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatJpeg' # type: str self.quality = kwargs.get('quality', None) @@ -627,8 +865,8 @@ class ImageFormatPng(ImageFormatProperties): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -643,6 +881,8 @@ def __init__( self, **kwargs ): + """ + """ super(ImageFormatPng, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatPng' # type: str @@ -652,12 +892,12 @@ class ImageFormatRaw(ImageFormatProperties): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param pixel_format: Required. Pixel format to be applied to the raw image. Possible values + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar pixel_format: Required. Pixel format to be applied to the raw image. Possible values include: "yuv420p", "rgb565be", "rgb565le", "rgb555be", "rgb555le", "rgb24", "bgr24", "argb", "rgba", "abgr", "bgra". - :type pixel_format: str or ~azure.media.videoanalyzer.edge.models.ImageFormatRawPixelFormat + :vartype pixel_format: str or ~azure.media.videoanalyzer.edge.models.ImageFormatRawPixelFormat """ _validation = { @@ -674,6 +914,13 @@ def __init__( self, **kwargs ): + """ + :keyword pixel_format: Required. Pixel format to be applied to the raw image. Possible values + include: "yuv420p", "rgb565be", "rgb565le", "rgb555be", "rgb555le", "rgb24", "bgr24", "argb", + "rgba", "abgr", "bgra". + :paramtype pixel_format: str or + ~azure.media.videoanalyzer.edge.models.ImageFormatRawPixelFormat + """ super(ImageFormatRaw, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatRaw' # type: str self.pixel_format = kwargs['pixel_format'] @@ -682,10 +929,10 @@ def __init__( class ImageProperties(msrest.serialization.Model): """Image transformations and formatting options to be applied to the video frame(s). - :param scale: Image scaling mode. - :type scale: ~azure.media.videoanalyzer.edge.models.ImageScale - :param format: Base class for image formatting properties. - :type format: ~azure.media.videoanalyzer.edge.models.ImageFormatProperties + :ivar scale: Image scaling mode. + :vartype scale: ~azure.media.videoanalyzer.edge.models.ImageScale + :ivar format: Base class for image formatting properties. + :vartype format: ~azure.media.videoanalyzer.edge.models.ImageFormatProperties """ _attribute_map = { @@ -697,6 +944,12 @@ def __init__( self, **kwargs ): + """ + :keyword scale: Image scaling mode. + :paramtype scale: ~azure.media.videoanalyzer.edge.models.ImageScale + :keyword format: Base class for image formatting properties. + :paramtype format: ~azure.media.videoanalyzer.edge.models.ImageFormatProperties + """ super(ImageProperties, self).__init__(**kwargs) self.scale = kwargs.get('scale', None) self.format = kwargs.get('format', None) @@ -705,13 +958,13 @@ def __init__( class ImageScale(msrest.serialization.Model): """Image scaling mode. - :param mode: Describes the image scaling mode to be applied. Default mode is 'pad'. Possible + :ivar mode: Describes the image scaling mode to be applied. Default mode is 'pad'. Possible values include: "preserveAspectRatio", "pad", "stretch". - :type mode: str or ~azure.media.videoanalyzer.edge.models.ImageScaleMode - :param width: The desired output image width. - :type width: str - :param height: The desired output image height. - :type height: str + :vartype mode: str or ~azure.media.videoanalyzer.edge.models.ImageScaleMode + :ivar width: The desired output image width. + :vartype width: str + :ivar height: The desired output image height. + :vartype height: str """ _attribute_map = { @@ -724,27 +977,75 @@ def __init__( self, **kwargs ): + """ + :keyword mode: Describes the image scaling mode to be applied. Default mode is 'pad'. Possible + values include: "preserveAspectRatio", "pad", "stretch". + :paramtype mode: str or ~azure.media.videoanalyzer.edge.models.ImageScaleMode + :keyword width: The desired output image width. + :paramtype width: str + :keyword height: The desired output image height. + :paramtype height: str + """ super(ImageScale, self).__init__(**kwargs) self.mode = kwargs.get('mode', None) self.width = kwargs.get('width', None) self.height = kwargs.get('height', None) +class IotHubDeviceConnection(msrest.serialization.Model): + """Information that enables communication between the IoT Hub and the IoT device - allowing this edge module to act as a transparent gateway between the two. + + All required parameters must be populated in order to send to Azure. + + :ivar device_id: Required. The name of the IoT device configured and managed in IoT Hub. + (case-sensitive). + :vartype device_id: str + :ivar credentials: IoT device connection credentials. Currently IoT device symmetric key + credentials are supported. + :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + """ + + _validation = { + 'device_id': {'required': True}, + } + + _attribute_map = { + 'device_id': {'key': 'deviceId', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword device_id: Required. The name of the IoT device configured and managed in IoT Hub. + (case-sensitive). + :paramtype device_id: str + :keyword credentials: IoT device connection credentials. Currently IoT device symmetric key + credentials are supported. + :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + """ + super(IotHubDeviceConnection, self).__init__(**kwargs) + self.device_id = kwargs['device_id'] + self.credentials = kwargs.get('credentials', None) + + class IotHubMessageSink(SinkNodeBase): """IoT Hub Message sink allows for pipeline messages to published into the IoT Edge Hub. Published messages can then be delivered to the cloud and other modules via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param hub_output_name: Required. Name of the Iot Edge Hub output to which the messages will be + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar hub_output_name: Required. Name of the Iot Edge Hub output to which the messages will be published. - :type hub_output_name: str + :vartype hub_output_name: str """ _validation = { @@ -765,6 +1066,16 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword hub_output_name: Required. Name of the Iot Edge Hub output to which the messages will + be published. + :paramtype hub_output_name: str + """ super(IotHubMessageSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.IotHubMessageSink' # type: str self.hub_output_name = kwargs['hub_output_name'] @@ -778,10 +1089,10 @@ class SourceNodeBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str """ _validation = { @@ -802,6 +1113,10 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + """ super(SourceNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = kwargs['name'] @@ -812,12 +1127,12 @@ class IotHubMessageSource(SourceNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param hub_input_name: Name of the IoT Edge Hub input from which messages will be consumed. - :type hub_input_name: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar hub_input_name: Name of the IoT Edge Hub input from which messages will be consumed. + :vartype hub_input_name: str """ _validation = { @@ -835,6 +1150,12 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword hub_input_name: Name of the IoT Edge Hub input from which messages will be consumed. + :paramtype hub_input_name: str + """ super(IotHubMessageSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.IotHubMessageSource' # type: str self.hub_input_name = kwargs.get('hub_input_name', None) @@ -845,15 +1166,15 @@ class LineCrossingProcessor(ProcessorNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param lines: Required. An array of lines used to compute line crossing events. - :type lines: list[~azure.media.videoanalyzer.edge.models.NamedLineBase] + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar lines: Required. An array of lines used to compute line crossing events. + :vartype lines: list[~azure.media.videoanalyzer.edge.models.NamedLineBase] """ _validation = { @@ -874,6 +1195,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword lines: Required. An array of lines used to compute line crossing events. + :paramtype lines: list[~azure.media.videoanalyzer.edge.models.NamedLineBase] + """ super(LineCrossingProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.LineCrossingProcessor' # type: str self.lines = kwargs['lines'] @@ -884,12 +1214,12 @@ class LivePipeline(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Live pipeline unique identifier. - :type name: str - :param system_data: Read-only system metadata associated with this object. - :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData - :param properties: Live pipeline properties. - :type properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties + :ivar name: Required. Live pipeline unique identifier. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Live pipeline properties. + :vartype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties """ _validation = { @@ -906,6 +1236,14 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Live pipeline unique identifier. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Live pipeline properties. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties + """ super(LivePipeline, self).__init__(**kwargs) self.name = kwargs['name'] self.system_data = kwargs.get('system_data', None) @@ -916,7 +1254,7 @@ class MethodRequest(msrest.serialization.Model): """Base class for direct method calls. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LivePipelineSetRequestBody, MethodRequestEmptyBodyBase, PipelineTopologySetRequestBody, LivePipelineListRequest, LivePipelineSetRequest, PipelineTopologyListRequest, PipelineTopologySetRequest. + sub-classes are: LivePipelineSetRequestBody, MethodRequestEmptyBodyBase, PipelineTopologySetRequestBody, RemoteDeviceAdapterSetRequestBody, LivePipelineListRequest, LivePipelineSetRequest, OnvifDeviceDiscoverRequest, OnvifDeviceGetRequest, PipelineTopologyListRequest, PipelineTopologySetRequest, RemoteDeviceAdapterListRequest, RemoteDeviceAdapterSetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -924,13 +1262,13 @@ class MethodRequest(msrest.serialization.Model): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, } _attribute_map = { @@ -939,24 +1277,28 @@ class MethodRequest(msrest.serialization.Model): } _subtype_map = { - 'method_name': {'LivePipelineSetRequestBody': 'LivePipelineSetRequestBody', 'MethodRequestEmptyBodyBase': 'MethodRequestEmptyBodyBase', 'PipelineTopologySetRequestBody': 'PipelineTopologySetRequestBody', 'livePipelineList': 'LivePipelineListRequest', 'livePipelineSet': 'LivePipelineSetRequest', 'pipelineTopologyList': 'PipelineTopologyListRequest', 'pipelineTopologySet': 'PipelineTopologySetRequest'} + 'method_name': {'LivePipelineSetRequestBody': 'LivePipelineSetRequestBody', 'MethodRequestEmptyBodyBase': 'MethodRequestEmptyBodyBase', 'PipelineTopologySetRequestBody': 'PipelineTopologySetRequestBody', 'RemoteDeviceAdapterSetRequestBody': 'RemoteDeviceAdapterSetRequestBody', 'livePipelineList': 'LivePipelineListRequest', 'livePipelineSet': 'LivePipelineSetRequest', 'onvifDeviceDiscover': 'OnvifDeviceDiscoverRequest', 'onvifDeviceGet': 'OnvifDeviceGetRequest', 'pipelineTopologyList': 'PipelineTopologyListRequest', 'pipelineTopologySet': 'PipelineTopologySetRequest', 'remoteDeviceAdapterList': 'RemoteDeviceAdapterListRequest', 'remoteDeviceAdapterSet': 'RemoteDeviceAdapterSetRequest'} } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + """ super(MethodRequest, self).__init__(**kwargs) self.method_name = None # type: Optional[str] + self.api_version = kwargs.get('api_version', "1.1") class MethodRequestEmptyBodyBase(MethodRequest): """MethodRequestEmptyBodyBase. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LivePipelineActivateRequest, LivePipelineDeactivateRequest, LivePipelineDeleteRequest, LivePipelineGetRequest, PipelineTopologyDeleteRequest, PipelineTopologyGetRequest. + sub-classes are: LivePipelineActivateRequest, LivePipelineDeactivateRequest, LivePipelineDeleteRequest, LivePipelineGetRequest, PipelineTopologyDeleteRequest, PipelineTopologyGetRequest, RemoteDeviceAdapterDeleteRequest, RemoteDeviceAdapterGetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -964,15 +1306,15 @@ class MethodRequestEmptyBodyBase(MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -983,15 +1325,20 @@ class MethodRequestEmptyBodyBase(MethodRequest): } _subtype_map = { - 'method_name': {'livePipelineActivate': 'LivePipelineActivateRequest', 'livePipelineDeactivate': 'LivePipelineDeactivateRequest', 'livePipelineDelete': 'LivePipelineDeleteRequest', 'livePipelineGet': 'LivePipelineGetRequest', 'pipelineTopologyDelete': 'PipelineTopologyDeleteRequest', 'pipelineTopologyGet': 'PipelineTopologyGetRequest'} + 'method_name': {'livePipelineActivate': 'LivePipelineActivateRequest', 'livePipelineDeactivate': 'LivePipelineDeactivateRequest', 'livePipelineDelete': 'LivePipelineDeleteRequest', 'livePipelineGet': 'LivePipelineGetRequest', 'pipelineTopologyDelete': 'PipelineTopologyDeleteRequest', 'pipelineTopologyGet': 'PipelineTopologyGetRequest', 'remoteDeviceAdapterDelete': 'RemoteDeviceAdapterDeleteRequest', 'remoteDeviceAdapterGet': 'RemoteDeviceAdapterGetRequest'} } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ super(MethodRequestEmptyBodyBase, self).__init__(**kwargs) self.method_name = 'MethodRequestEmptyBodyBase' # type: str self.name = kwargs['name'] @@ -1006,15 +1353,15 @@ class LivePipelineActivateRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1024,12 +1371,17 @@ class LivePipelineActivateRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ super(LivePipelineActivateRequest, self).__init__(**kwargs) self.method_name = 'livePipelineActivate' # type: str @@ -1037,12 +1389,12 @@ def __init__( class LivePipelineCollection(msrest.serialization.Model): """A collection of live pipelines. - :param value: List of live pipelines. - :type value: list[~azure.media.videoanalyzer.edge.models.LivePipeline] - :param continuation_token: A continuation token to be used in subsequent calls when enumerating + :ivar value: List of live pipelines. + :vartype value: list[~azure.media.videoanalyzer.edge.models.LivePipeline] + :ivar continuation_token: A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response. - :type continuation_token: str + :vartype continuation_token: str """ _attribute_map = { @@ -1054,6 +1406,14 @@ def __init__( self, **kwargs ): + """ + :keyword value: List of live pipelines. + :paramtype value: list[~azure.media.videoanalyzer.edge.models.LivePipeline] + :keyword continuation_token: A continuation token to be used in subsequent calls when + enumerating through the collection. This is returned when the collection results won't fit in a + single response. + :paramtype continuation_token: str + """ super(LivePipelineCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.continuation_token = kwargs.get('continuation_token', None) @@ -1068,15 +1428,15 @@ class LivePipelineDeactivateRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1086,12 +1446,17 @@ class LivePipelineDeactivateRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ super(LivePipelineDeactivateRequest, self).__init__(**kwargs) self.method_name = 'livePipelineDeactivate' # type: str @@ -1105,15 +1470,15 @@ class LivePipelineDeleteRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1123,12 +1488,17 @@ class LivePipelineDeleteRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ super(LivePipelineDeleteRequest, self).__init__(**kwargs) self.method_name = 'livePipelineDelete' # type: str @@ -1142,15 +1512,15 @@ class LivePipelineGetRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1160,12 +1530,17 @@ class LivePipelineGetRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ super(LivePipelineGetRequest, self).__init__(**kwargs) self.method_name = 'livePipelineGet' # type: str @@ -1179,13 +1554,13 @@ class LivePipelineListRequest(MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, } _attribute_map = { @@ -1193,12 +1568,15 @@ class LivePipelineListRequest(MethodRequest): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + """ super(LivePipelineListRequest, self).__init__(**kwargs) self.method_name = 'livePipelineList' # type: str @@ -1206,20 +1584,20 @@ def __init__( class LivePipelineProperties(msrest.serialization.Model): """Live pipeline properties. - :param description: An optional description of the live pipeline. - :type description: str - :param topology_name: The reference to an existing pipeline topology defined for real-time + :ivar description: An optional description of the live pipeline. + :vartype description: str + :ivar topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. - :type topology_name: str - :param parameters: List of the instance level parameter values for the user-defined topology + :vartype topology_name: str + :ivar parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. - :type parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDefinition] - :param state: Current pipeline state (read-only). Possible values include: "inactive", + :vartype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDefinition] + :ivar state: Current pipeline state (read-only). Possible values include: "inactive", "activating", "active", "deactivating". - :type state: str or ~azure.media.videoanalyzer.edge.models.LivePipelineState + :vartype state: str or ~azure.media.videoanalyzer.edge.models.LivePipelineState """ _attribute_map = { @@ -1233,6 +1611,22 @@ def __init__( self, **kwargs ): + """ + :keyword description: An optional description of the live pipeline. + :paramtype description: str + :keyword topology_name: The reference to an existing pipeline topology defined for real-time + content processing. When activated, this live pipeline will process content according to the + pipeline topology definition. + :paramtype topology_name: str + :keyword parameters: List of the instance level parameter values for the user-defined topology + parameters. A pipeline can only define or override parameters values for parameters which have + been declared in the referenced topology. Topology parameters without a default value must be + defined. Topology parameters with a default value can be optionally be overridden. + :paramtype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDefinition] + :keyword state: Current pipeline state (read-only). Possible values include: "inactive", + "activating", "active", "deactivating". + :paramtype state: str or ~azure.media.videoanalyzer.edge.models.LivePipelineState + """ super(LivePipelineProperties, self).__init__(**kwargs) self.description = kwargs.get('description', None) self.topology_name = kwargs.get('topology_name', None) @@ -1249,16 +1643,16 @@ class LivePipelineSetRequest(MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param live_pipeline: Required. Live Pipeline represents an unique instance of a pipeline + :ivar live_pipeline: Required. Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis. - :type live_pipeline: ~azure.media.videoanalyzer.edge.models.LivePipeline + :vartype live_pipeline: ~azure.media.videoanalyzer.edge.models.LivePipeline """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'live_pipeline': {'required': True}, } @@ -1268,19 +1662,25 @@ class LivePipelineSetRequest(MethodRequest): 'live_pipeline': {'key': 'livePipeline', 'type': 'LivePipeline'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword live_pipeline: Required. Live Pipeline represents an unique instance of a pipeline + topology which is used for real-time content ingestion and analysis. + :paramtype live_pipeline: ~azure.media.videoanalyzer.edge.models.LivePipeline + """ super(LivePipelineSetRequest, self).__init__(**kwargs) self.method_name = 'livePipelineSet' # type: str self.live_pipeline = kwargs['live_pipeline'] class LivePipelineSetRequestBody(LivePipeline, MethodRequest): - """Live pipeline resource representation. + """Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis. Variables are only populated by the server, and will be ignored when sending a request. @@ -1288,19 +1688,19 @@ class LivePipelineSetRequestBody(LivePipeline, MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Live pipeline unique identifier. - :type name: str - :param system_data: Read-only system metadata associated with this object. - :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData - :param properties: Live pipeline properties. - :type properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties + :ivar name: Required. Live pipeline unique identifier. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Live pipeline properties. + :vartype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1312,42 +1712,113 @@ class LivePipelineSetRequestBody(LivePipeline, MethodRequest): 'properties': {'key': 'properties', 'type': 'LivePipelineProperties'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Live pipeline unique identifier. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Live pipeline properties. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties + """ super(LivePipelineSetRequestBody, self).__init__(**kwargs) self.method_name = 'LivePipelineSetRequestBody' # type: str + self.api_version = kwargs.get('api_version', "1.1") self.method_name = 'LivePipelineSetRequestBody' # type: str self.name = kwargs['name'] self.system_data = kwargs.get('system_data', None) self.properties = kwargs.get('properties', None) +class MediaProfile(msrest.serialization.Model): + """Class representing the ONVIF MediaProfiles. + + :ivar name: The name of the Media Profile. + :vartype name: str + :ivar media_uri: Object representing the URI that will be used to request for media streaming. + :vartype media_uri: any + :ivar video_encoder_configuration: The Video encoder configuration. + :vartype video_encoder_configuration: + ~azure.media.videoanalyzer.edge.models.VideoEncoderConfiguration + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'media_uri': {'key': 'mediaUri', 'type': 'object'}, + 'video_encoder_configuration': {'key': 'videoEncoderConfiguration', 'type': 'VideoEncoderConfiguration'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword name: The name of the Media Profile. + :paramtype name: str + :keyword media_uri: Object representing the URI that will be used to request for media + streaming. + :paramtype media_uri: any + :keyword video_encoder_configuration: The Video encoder configuration. + :paramtype video_encoder_configuration: + ~azure.media.videoanalyzer.edge.models.VideoEncoderConfiguration + """ + super(MediaProfile, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.media_uri = kwargs.get('media_uri', None) + self.video_encoder_configuration = kwargs.get('video_encoder_configuration', None) + + +class MediaUri(msrest.serialization.Model): + """Object representing the URI that will be used to request for media streaming. + + :ivar uri: URI that can be used for media streaming. + :vartype uri: str + """ + + _attribute_map = { + 'uri': {'key': 'uri', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword uri: URI that can be used for media streaming. + :paramtype uri: str + """ + super(MediaUri, self).__init__(**kwargs) + self.uri = kwargs.get('uri', None) + + class MotionDetectionProcessor(ProcessorNodeBase): """Motion detection processor allows for motion detection on the video stream. It generates motion events whenever motion is present on the video. All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param sensitivity: Motion detection sensitivity: low, medium, high. Possible values include: + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar sensitivity: Motion detection sensitivity: low, medium, high. Possible values include: "low", "medium", "high". - :type sensitivity: str or ~azure.media.videoanalyzer.edge.models.MotionDetectionSensitivity - :param output_motion_region: Indicates whether the processor should detect and output the + :vartype sensitivity: str or ~azure.media.videoanalyzer.edge.models.MotionDetectionSensitivity + :ivar output_motion_region: Indicates whether the processor should detect and output the regions within the video frame where motion was detected. Default is true. - :type output_motion_region: bool - :param event_aggregation_window: Time window duration on which events are aggregated before + :vartype output_motion_region: bool + :ivar event_aggregation_window: Time window duration on which events are aggregated before being emitted. Value must be specified in ISO8601 duration format (i.e. "PT2S" equals 2 seconds). Use 0 seconds for no aggregation. Default is 1 second. - :type event_aggregation_window: str + :vartype event_aggregation_window: str """ _validation = { @@ -1369,6 +1840,24 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword sensitivity: Motion detection sensitivity: low, medium, high. Possible values include: + "low", "medium", "high". + :paramtype sensitivity: str or + ~azure.media.videoanalyzer.edge.models.MotionDetectionSensitivity + :keyword output_motion_region: Indicates whether the processor should detect and output the + regions within the video frame where motion was detected. Default is true. + :paramtype output_motion_region: bool + :keyword event_aggregation_window: Time window duration on which events are aggregated before + being emitted. Value must be specified in ISO8601 duration format (i.e. "PT2S" equals 2 + seconds). Use 0 seconds for no aggregation. Default is 1 second. + :paramtype event_aggregation_window: str + """ super(MotionDetectionProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.MotionDetectionProcessor' # type: str self.sensitivity = kwargs.get('sensitivity', None) @@ -1376,6 +1865,35 @@ def __init__( self.event_aggregation_window = kwargs.get('event_aggregation_window', None) +class MPEG4Configuration(msrest.serialization.Model): + """Class representing the MPEG4 Configuration. + + :ivar gov_length: Group of Video frames length. + :vartype gov_length: float + :ivar profile: The MPEG4 Profile. Possible values include: "SP", "ASP". + :vartype profile: str or ~azure.media.videoanalyzer.edge.models.MPEG4Profile + """ + + _attribute_map = { + 'gov_length': {'key': 'govLength', 'type': 'float'}, + 'profile': {'key': 'profile', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword gov_length: Group of Video frames length. + :paramtype gov_length: float + :keyword profile: The MPEG4 Profile. Possible values include: "SP", "ASP". + :paramtype profile: str or ~azure.media.videoanalyzer.edge.models.MPEG4Profile + """ + super(MPEG4Configuration, self).__init__(**kwargs) + self.gov_length = kwargs.get('gov_length', None) + self.profile = kwargs.get('profile', None) + + class NamedLineBase(msrest.serialization.Model): """Base class for named lines. @@ -1384,10 +1902,10 @@ class NamedLineBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Line name. Must be unique within the node. - :type name: str + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Line name. Must be unique within the node. + :vartype name: str """ _validation = { @@ -1408,6 +1926,10 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Line name. Must be unique within the node. + :paramtype name: str + """ super(NamedLineBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = kwargs['name'] @@ -1418,15 +1940,15 @@ class NamedLineString(NamedLineBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Line name. Must be unique within the node. - :type name: str - :param line: Required. Point coordinates for the line start and end, respectively. Example: + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Line name. Must be unique within the node. + :vartype name: str + :ivar line: Required. Point coordinates for the line start and end, respectively. Example: '[[0.3, 0.2],[0.9, 0.8]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. - :type line: str + :vartype line: str """ _validation = { @@ -1445,6 +1967,15 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Line name. Must be unique within the node. + :paramtype name: str + :keyword line: Required. Point coordinates for the line start and end, respectively. Example: + '[[0.3, 0.2],[0.9, 0.8]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging + from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right + frame corner. + :paramtype line: str + """ super(NamedLineString, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.NamedLineString' # type: str self.line = kwargs['line'] @@ -1458,10 +1989,10 @@ class NamedPolygonBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Polygon name. Must be unique within the node. - :type name: str + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Polygon name. Must be unique within the node. + :vartype name: str """ _validation = { @@ -1482,6 +2013,10 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Polygon name. Must be unique within the node. + :paramtype name: str + """ super(NamedPolygonBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = kwargs['name'] @@ -1492,14 +2027,14 @@ class NamedPolygonString(NamedPolygonBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Polygon name. Must be unique within the node. - :type name: str - :param polygon: Required. Point coordinates for the polygon. Example: '[[0.3, 0.2],[0.9, + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Polygon name. Must be unique within the node. + :vartype name: str + :ivar polygon: Required. Point coordinates for the polygon. Example: '[[0.3, 0.2],[0.9, 0.8],[0.7, 0.6]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. - :type polygon: str + :vartype polygon: str """ _validation = { @@ -1518,6 +2053,14 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Polygon name. Must be unique within the node. + :paramtype name: str + :keyword polygon: Required. Point coordinates for the polygon. Example: '[[0.3, 0.2],[0.9, + 0.8],[0.7, 0.6]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to + 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. + :paramtype polygon: str + """ super(NamedPolygonString, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.NamedPolygonString' # type: str self.polygon = kwargs['polygon'] @@ -1528,12 +2071,12 @@ class NodeInput(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param node_name: Required. The name of the upstream node in the pipeline which output is used + :ivar node_name: Required. The name of the upstream node in the pipeline which output is used as input of the current node. - :type node_name: str - :param output_selectors: Allows for the selection of specific data streams (eg. video only) - from another node. - :type output_selectors: list[~azure.media.videoanalyzer.edge.models.OutputSelector] + :vartype node_name: str + :ivar output_selectors: Allows for the selection of specific data streams (eg. video only) from + another node. + :vartype output_selectors: list[~azure.media.videoanalyzer.edge.models.OutputSelector] """ _validation = { @@ -1549,6 +2092,14 @@ def __init__( self, **kwargs ): + """ + :keyword node_name: Required. The name of the upstream node in the pipeline which output is + used as input of the current node. + :paramtype node_name: str + :keyword output_selectors: Allows for the selection of specific data streams (eg. video only) + from another node. + :paramtype output_selectors: list[~azure.media.videoanalyzer.edge.models.OutputSelector] + """ super(NodeInput, self).__init__(**kwargs) self.node_name = kwargs['node_name'] self.output_selectors = kwargs.get('output_selectors', None) @@ -1559,16 +2110,16 @@ class ObjectTrackingProcessor(ProcessorNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param accuracy: Object tracker accuracy: low, medium, high. Higher accuracy leads to higher - CPU consumption in average. Possible values include: "low", "medium", "high". - :type accuracy: str or ~azure.media.videoanalyzer.edge.models.ObjectTrackingAccuracy + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar accuracy: Object tracker accuracy: low, medium, high. Higher accuracy leads to higher CPU + consumption in average. Possible values include: "low", "medium", "high". + :vartype accuracy: str or ~azure.media.videoanalyzer.edge.models.ObjectTrackingAccuracy """ _validation = { @@ -1588,21 +2139,260 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword accuracy: Object tracker accuracy: low, medium, high. Higher accuracy leads to higher + CPU consumption in average. Possible values include: "low", "medium", "high". + :paramtype accuracy: str or ~azure.media.videoanalyzer.edge.models.ObjectTrackingAccuracy + """ super(ObjectTrackingProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ObjectTrackingProcessor' # type: str self.accuracy = kwargs.get('accuracy', None) +class OnvifDevice(msrest.serialization.Model): + """The ONVIF device properties. + + :ivar hostname: The hostname of the ONVIF device. + :vartype hostname: ~azure.media.videoanalyzer.edge.models.OnvifHostName + :ivar system_date_time: The system date and time of the ONVIF device. + :vartype system_date_time: ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTime + :ivar dns: The ONVIF device DNS properties. + :vartype dns: ~azure.media.videoanalyzer.edge.models.OnvifDns + :ivar media_profiles: An array of of ONVIF media profiles supported by the ONVIF device. + :vartype media_profiles: list[~azure.media.videoanalyzer.edge.models.MediaProfile] + """ + + _attribute_map = { + 'hostname': {'key': 'hostname', 'type': 'OnvifHostName'}, + 'system_date_time': {'key': 'systemDateTime', 'type': 'OnvifSystemDateTime'}, + 'dns': {'key': 'dns', 'type': 'OnvifDns'}, + 'media_profiles': {'key': 'mediaProfiles', 'type': '[MediaProfile]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword hostname: The hostname of the ONVIF device. + :paramtype hostname: ~azure.media.videoanalyzer.edge.models.OnvifHostName + :keyword system_date_time: The system date and time of the ONVIF device. + :paramtype system_date_time: ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTime + :keyword dns: The ONVIF device DNS properties. + :paramtype dns: ~azure.media.videoanalyzer.edge.models.OnvifDns + :keyword media_profiles: An array of of ONVIF media profiles supported by the ONVIF device. + :paramtype media_profiles: list[~azure.media.videoanalyzer.edge.models.MediaProfile] + """ + super(OnvifDevice, self).__init__(**kwargs) + self.hostname = kwargs.get('hostname', None) + self.system_date_time = kwargs.get('system_date_time', None) + self.dns = kwargs.get('dns', None) + self.media_profiles = kwargs.get('media_profiles', None) + + +class OnvifDeviceDiscoverRequest(MethodRequest): + """Lists all the discoverable ONVIF devices on the same subnet as the Edge Module. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar discovery_duration: The amount of time that the ONVIF device discovery will wait for + supported device responses. + :vartype discovery_duration: str + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'discovery_duration': {'key': 'discoveryDuration', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword discovery_duration: The amount of time that the ONVIF device discovery will wait for + supported device responses. + :paramtype discovery_duration: str + """ + super(OnvifDeviceDiscoverRequest, self).__init__(**kwargs) + self.method_name = 'onvifDeviceDiscover' # type: str + self.discovery_duration = kwargs.get('discovery_duration', None) + + +class OnvifDeviceGetRequest(MethodRequest): + """Retrieves properties and media profiles of an ONVIF device. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar endpoint: Required. Base class for endpoints. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword endpoint: Required. Base class for endpoints. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + """ + super(OnvifDeviceGetRequest, self).__init__(**kwargs) + self.method_name = 'onvifDeviceGet' # type: str + self.endpoint = kwargs['endpoint'] + + +class OnvifDns(msrest.serialization.Model): + """The ONVIF device DNS properties. + + :ivar from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. + :vartype from_dhcp: bool + :ivar ipv4_address: An array of IPv4 address for the discovered ONVIF device. + :vartype ipv4_address: list[str] + :ivar ipv6_address: An array of IPv6 address for the discovered ONVIF device. + :vartype ipv6_address: list[str] + """ + + _attribute_map = { + 'from_dhcp': {'key': 'fromDhcp', 'type': 'bool'}, + 'ipv4_address': {'key': 'ipv4Address', 'type': '[str]'}, + 'ipv6_address': {'key': 'ipv6Address', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. + :paramtype from_dhcp: bool + :keyword ipv4_address: An array of IPv4 address for the discovered ONVIF device. + :paramtype ipv4_address: list[str] + :keyword ipv6_address: An array of IPv6 address for the discovered ONVIF device. + :paramtype ipv6_address: list[str] + """ + super(OnvifDns, self).__init__(**kwargs) + self.from_dhcp = kwargs.get('from_dhcp', None) + self.ipv4_address = kwargs.get('ipv4_address', None) + self.ipv6_address = kwargs.get('ipv6_address', None) + + +class OnvifHostName(msrest.serialization.Model): + """The ONVIF device DNS properties. + + :ivar from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. + :vartype from_dhcp: bool + :ivar hostname: The hostname of the ONVIF device. + :vartype hostname: str + """ + + _attribute_map = { + 'from_dhcp': {'key': 'fromDhcp', 'type': 'bool'}, + 'hostname': {'key': 'hostname', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. + :paramtype from_dhcp: bool + :keyword hostname: The hostname of the ONVIF device. + :paramtype hostname: str + """ + super(OnvifHostName, self).__init__(**kwargs) + self.from_dhcp = kwargs.get('from_dhcp', None) + self.hostname = kwargs.get('hostname', None) + + +class OnvifSystemDateTime(msrest.serialization.Model): + """The ONVIF device DNS properties. + + :ivar type: An enum value determining whether the date time was configured using NTP or manual. + Possible values include: "Ntp", "Manual". + :vartype type: str or ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTimeType + :ivar time: The device datetime returned when calling the request. + :vartype time: str + :ivar time_zone: The timezone of the ONVIF device datetime. + :vartype time_zone: str + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'time': {'key': 'time', 'type': 'str'}, + 'time_zone': {'key': 'timeZone', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword type: An enum value determining whether the date time was configured using NTP or + manual. Possible values include: "Ntp", "Manual". + :paramtype type: str or ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTimeType + :keyword time: The device datetime returned when calling the request. + :paramtype time: str + :keyword time_zone: The timezone of the ONVIF device datetime. + :paramtype time_zone: str + """ + super(OnvifSystemDateTime, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.time = kwargs.get('time', None) + self.time_zone = kwargs.get('time_zone', None) + + class OutputSelector(msrest.serialization.Model): """Allows for the selection of particular streams from another node. - :param property: The property of the data stream to be used as the selection criteria. Possible + :ivar property: The property of the data stream to be used as the selection criteria. Possible values include: "mediaType". - :type property: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorProperty - :param operator: The operator to compare properties by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorOperator - :param value: Value to compare against. - :type value: str + :vartype property: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorProperty + :ivar operator: The operator to compare properties by. Possible values include: "is", "isNot". + :vartype operator: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorOperator + :ivar value: Value to compare against. + :vartype value: str """ _attribute_map = { @@ -1615,6 +2405,16 @@ def __init__( self, **kwargs ): + """ + :keyword property: The property of the data stream to be used as the selection criteria. + Possible values include: "mediaType". + :paramtype property: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorProperty + :keyword operator: The operator to compare properties by. Possible values include: "is", + "isNot". + :paramtype operator: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorOperator + :keyword value: Value to compare against. + :paramtype value: str + """ super(OutputSelector, self).__init__(**kwargs) self.property = kwargs.get('property', None) self.operator = kwargs.get('operator', None) @@ -1626,16 +2426,16 @@ class ParameterDeclaration(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Name of the parameter. - :type name: str - :param type: Required. Type of the parameter. Possible values include: "string", - "secretString", "int", "double", "bool". - :type type: str or ~azure.media.videoanalyzer.edge.models.ParameterType - :param description: Description of the parameter. - :type description: str - :param default: The default value for the parameter to be used if the live pipeline does not + :ivar name: Required. Name of the parameter. + :vartype name: str + :ivar type: Required. Type of the parameter. Possible values include: "string", "secretString", + "int", "double", "bool". + :vartype type: str or ~azure.media.videoanalyzer.edge.models.ParameterType + :ivar description: Description of the parameter. + :vartype description: str + :ivar default: The default value for the parameter to be used if the live pipeline does not specify a value. - :type default: str + :vartype default: str """ _validation = { @@ -1654,6 +2454,18 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Name of the parameter. + :paramtype name: str + :keyword type: Required. Type of the parameter. Possible values include: "string", + "secretString", "int", "double", "bool". + :paramtype type: str or ~azure.media.videoanalyzer.edge.models.ParameterType + :keyword description: Description of the parameter. + :paramtype description: str + :keyword default: The default value for the parameter to be used if the live pipeline does not + specify a value. + :paramtype default: str + """ super(ParameterDeclaration, self).__init__(**kwargs) self.name = kwargs['name'] self.type = kwargs['type'] @@ -1666,10 +2478,10 @@ class ParameterDefinition(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Name of the parameter declared in the pipeline topology. - :type name: str - :param value: Parameter value to be applied on this specific live pipeline. - :type value: str + :ivar name: Required. Name of the parameter declared in the pipeline topology. + :vartype name: str + :ivar value: Parameter value to be applied on this specific live pipeline. + :vartype value: str """ _validation = { @@ -1685,6 +2497,12 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Name of the parameter declared in the pipeline topology. + :paramtype name: str + :keyword value: Parameter value to be applied on this specific live pipeline. + :paramtype value: str + """ super(ParameterDefinition, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs.get('value', None) @@ -1695,10 +2513,10 @@ class PemCertificateList(CertificateSource): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param certificates: Required. PEM formatted public certificates. One certificate per entry. - :type certificates: list[str] + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar certificates: Required. PEM formatted public certificates. One certificate per entry. + :vartype certificates: list[str] """ _validation = { @@ -1715,6 +2533,10 @@ def __init__( self, **kwargs ): + """ + :keyword certificates: Required. PEM formatted public certificates. One certificate per entry. + :paramtype certificates: list[str] + """ super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates = kwargs['certificates'] @@ -1731,12 +2553,12 @@ class PipelineTopology(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Pipeline topology unique identifier. - :type name: str - :param system_data: Read-only system metadata associated with this object. - :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData - :param properties: Pipeline topology properties. - :type properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties + :ivar name: Required. Pipeline topology unique identifier. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Pipeline topology properties. + :vartype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties """ _validation = { @@ -1753,6 +2575,14 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Pipeline topology unique identifier. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Pipeline topology properties. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties + """ super(PipelineTopology, self).__init__(**kwargs) self.name = kwargs['name'] self.system_data = kwargs.get('system_data', None) @@ -1762,12 +2592,12 @@ def __init__( class PipelineTopologyCollection(msrest.serialization.Model): """A collection of pipeline topologies. - :param value: List of pipeline topologies. - :type value: list[~azure.media.videoanalyzer.edge.models.PipelineTopology] - :param continuation_token: A continuation token to be used in subsequent calls when enumerating + :ivar value: List of pipeline topologies. + :vartype value: list[~azure.media.videoanalyzer.edge.models.PipelineTopology] + :ivar continuation_token: A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response. - :type continuation_token: str + :vartype continuation_token: str """ _attribute_map = { @@ -1779,6 +2609,14 @@ def __init__( self, **kwargs ): + """ + :keyword value: List of pipeline topologies. + :paramtype value: list[~azure.media.videoanalyzer.edge.models.PipelineTopology] + :keyword continuation_token: A continuation token to be used in subsequent calls when + enumerating through the collection. This is returned when the collection results won't fit in a + single response. + :paramtype continuation_token: str + """ super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.continuation_token = kwargs.get('continuation_token', None) @@ -1793,15 +2631,15 @@ class PipelineTopologyDeleteRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1811,12 +2649,17 @@ class PipelineTopologyDeleteRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ super(PipelineTopologyDeleteRequest, self).__init__(**kwargs) self.method_name = 'pipelineTopologyDelete' # type: str @@ -1830,15 +2673,15 @@ class PipelineTopologyGetRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1848,12 +2691,17 @@ class PipelineTopologyGetRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ super(PipelineTopologyGetRequest, self).__init__(**kwargs) self.method_name = 'pipelineTopologyGet' # type: str @@ -1867,13 +2715,13 @@ class PipelineTopologyListRequest(MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, } _attribute_map = { @@ -1881,12 +2729,15 @@ class PipelineTopologyListRequest(MethodRequest): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + """ super(PipelineTopologyListRequest, self).__init__(**kwargs) self.method_name = 'pipelineTopologyList' # type: str @@ -1894,23 +2745,23 @@ def __init__( class PipelineTopologyProperties(msrest.serialization.Model): """Pipeline topology properties. - :param description: An optional description of the pipeline topology. It is recommended that - the expected use of the topology to be described here. - :type description: str - :param parameters: List of the topology parameter declarations. Parameters declared here can be + :ivar description: An optional description of the pipeline topology. It is recommended that the + expected use of the topology to be described here. + :vartype description: str + :ivar parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. - :type parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDeclaration] - :param sources: List of the topology source nodes. Source nodes enable external data to be + :vartype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDeclaration] + :ivar sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. - :type sources: list[~azure.media.videoanalyzer.edge.models.SourceNodeBase] - :param processors: List of the topology processor nodes. Processor nodes enable pipeline data - to be analyzed, processed or transformed. - :type processors: list[~azure.media.videoanalyzer.edge.models.ProcessorNodeBase] - :param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or + :vartype sources: list[~azure.media.videoanalyzer.edge.models.SourceNodeBase] + :ivar processors: List of the topology processor nodes. Processor nodes enable pipeline data to + be analyzed, processed or transformed. + :vartype processors: list[~azure.media.videoanalyzer.edge.models.ProcessorNodeBase] + :ivar sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. - :type sinks: list[~azure.media.videoanalyzer.edge.models.SinkNodeBase] + :vartype sinks: list[~azure.media.videoanalyzer.edge.models.SinkNodeBase] """ _attribute_map = { @@ -1925,6 +2776,25 @@ def __init__( self, **kwargs ): + """ + :keyword description: An optional description of the pipeline topology. It is recommended that + the expected use of the topology to be described here. + :paramtype description: str + :keyword parameters: List of the topology parameter declarations. Parameters declared here can + be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string + pattern. Parameters can have optional default values and can later be defined in individual + instances of the pipeline. + :paramtype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDeclaration] + :keyword sources: List of the topology source nodes. Source nodes enable external data to be + ingested by the pipeline. + :paramtype sources: list[~azure.media.videoanalyzer.edge.models.SourceNodeBase] + :keyword processors: List of the topology processor nodes. Processor nodes enable pipeline data + to be analyzed, processed or transformed. + :paramtype processors: list[~azure.media.videoanalyzer.edge.models.ProcessorNodeBase] + :keyword sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or + exported. + :paramtype sinks: list[~azure.media.videoanalyzer.edge.models.SinkNodeBase] + """ super(PipelineTopologyProperties, self).__init__(**kwargs) self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters', None) @@ -1942,9 +2812,10 @@ class PipelineTopologySetRequest(MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param pipeline_topology: Required. Pipeline topology describes the processing steps to be + :ivar pipeline_topology: Required. Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires @@ -1963,12 +2834,11 @@ class PipelineTopologySetRequest(MethodRequest): * Processors: list of nodes which perform data analysis or transformations. -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. - :type pipeline_topology: ~azure.media.videoanalyzer.edge.models.PipelineTopology + :vartype pipeline_topology: ~azure.media.videoanalyzer.edge.models.PipelineTopology """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'pipeline_topology': {'required': True}, } @@ -1978,19 +2848,48 @@ class PipelineTopologySetRequest(MethodRequest): 'pipeline_topology': {'key': 'pipelineTopology', 'type': 'PipelineTopology'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword pipeline_topology: Required. Pipeline topology describes the processing steps to be + applied when processing media for a particular outcome. The topology should be defined + according to the scenario to be achieved and can be reused across many pipeline instances which + share the same processing characteristics. For instance, a pipeline topology which acquires + data from a RTSP camera, process it with an specific AI model and stored the data on the cloud + can be reused across many different cameras, as long as the same processing should be applied + across all the cameras. Individual instance properties can be defined through the use of + user-defined parameters, which allow for a topology to be parameterized, thus allowing + individual pipelines to refer to different values, such as individual cameras RTSP endpoints + and credentials. Overall a topology is composed of the following: + + + * Parameters: list of user defined parameters that can be references across the topology + nodes. + * Sources: list of one or more data sources nodes such as an RTSP source which allows for + media to be ingested from cameras. + * Processors: list of nodes which perform data analysis or transformations. + -Sinks: list of one or more data sinks which allow for data to be stored or exported to + other destinations. + :paramtype pipeline_topology: ~azure.media.videoanalyzer.edge.models.PipelineTopology + """ super(PipelineTopologySetRequest, self).__init__(**kwargs) self.method_name = 'pipelineTopologySet' # type: str self.pipeline_topology = kwargs['pipeline_topology'] class PipelineTopologySetRequestBody(PipelineTopology, MethodRequest): - """Pipeline topology resource representation. + """Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires data from a RTSP camera, process it with an specific AI model and stored the data on the cloud can be reused across many different cameras, as long as the same processing should be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized, thus allowing individual pipelines to refer to different values, such as individual cameras RTSP endpoints and credentials. Overall a topology is composed of the following: + + +* Parameters: list of user defined parameters that can be references across the topology nodes. +* Sources: list of one or more data sources nodes such as an RTSP source which allows for media to be ingested from cameras. +* Processors: list of nodes which perform data analysis or transformations. + -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. Variables are only populated by the server, and will be ignored when sending a request. @@ -1998,19 +2897,19 @@ class PipelineTopologySetRequestBody(PipelineTopology, MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Pipeline topology unique identifier. - :type name: str - :param system_data: Read-only system metadata associated with this object. - :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData - :param properties: Pipeline topology properties. - :type properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties + :ivar name: Required. Pipeline topology unique identifier. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Pipeline topology properties. + :vartype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -2022,39 +2921,471 @@ class PipelineTopologySetRequestBody(PipelineTopology, MethodRequest): 'properties': {'key': 'properties', 'type': 'PipelineTopologyProperties'}, } - api_version = "1.0" - def __init__( self, **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Pipeline topology unique identifier. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Pipeline topology properties. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties + """ super(PipelineTopologySetRequestBody, self).__init__(**kwargs) self.method_name = 'PipelineTopologySetRequestBody' # type: str + self.api_version = kwargs.get('api_version', "1.1") self.method_name = 'PipelineTopologySetRequestBody' # type: str self.name = kwargs['name'] self.system_data = kwargs.get('system_data', None) self.properties = kwargs.get('properties', None) +class RateControl(msrest.serialization.Model): + """Class representing the video's rate control. + + :ivar bit_rate_limit: the maximum output bitrate in kbps. + :vartype bit_rate_limit: float + :ivar encoding_interval: Interval at which images are encoded and transmitted. + :vartype encoding_interval: float + :ivar frame_rate_limit: Maximum output framerate in fps. + :vartype frame_rate_limit: float + :ivar guaranteed_frame_rate: A value of true indicates that frame rate is a fixed value rather + than an upper limit, and that the video encoder shall prioritize frame rate over all other + adaptable configuration values such as bitrate. + :vartype guaranteed_frame_rate: bool + """ + + _attribute_map = { + 'bit_rate_limit': {'key': 'bitRateLimit', 'type': 'float'}, + 'encoding_interval': {'key': 'encodingInterval', 'type': 'float'}, + 'frame_rate_limit': {'key': 'frameRateLimit', 'type': 'float'}, + 'guaranteed_frame_rate': {'key': 'guaranteedFrameRate', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword bit_rate_limit: the maximum output bitrate in kbps. + :paramtype bit_rate_limit: float + :keyword encoding_interval: Interval at which images are encoded and transmitted. + :paramtype encoding_interval: float + :keyword frame_rate_limit: Maximum output framerate in fps. + :paramtype frame_rate_limit: float + :keyword guaranteed_frame_rate: A value of true indicates that frame rate is a fixed value + rather than an upper limit, and that the video encoder shall prioritize frame rate over all + other adaptable configuration values such as bitrate. + :paramtype guaranteed_frame_rate: bool + """ + super(RateControl, self).__init__(**kwargs) + self.bit_rate_limit = kwargs.get('bit_rate_limit', None) + self.encoding_interval = kwargs.get('encoding_interval', None) + self.frame_rate_limit = kwargs.get('frame_rate_limit', None) + self.guaranteed_frame_rate = kwargs.get('guaranteed_frame_rate', None) + + +class RemoteDeviceAdapter(msrest.serialization.Model): + """The Video Analyzer edge module can act as a transparent gateway for video, enabling IoT devices to send video to the cloud from behind a firewall. A remote device adapter should be created for each such IoT device. Communication between the cloud and IoT device would then flow via the Video Analyzer edge module. + + All required parameters must be populated in order to send to Azure. + + :ivar name: Required. The unique identifier for the remote device adapter. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Properties of the remote device adapter. + :vartype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, + 'properties': {'key': 'properties', 'type': 'RemoteDeviceAdapterProperties'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword name: Required. The unique identifier for the remote device adapter. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Properties of the remote device adapter. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties + """ + super(RemoteDeviceAdapter, self).__init__(**kwargs) + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class RemoteDeviceAdapterCollection(msrest.serialization.Model): + """A list of remote device adapters. + + :ivar value: An array of remote device adapters. + :vartype value: list[~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter] + :ivar continuation_token: A continuation token to use in subsequent calls to enumerate through + the remote device adapter collection. This is used when the collection contains too many + results to return in one response. + :vartype continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[RemoteDeviceAdapter]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword value: An array of remote device adapters. + :paramtype value: list[~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter] + :keyword continuation_token: A continuation token to use in subsequent calls to enumerate + through the remote device adapter collection. This is used when the collection contains too + many results to return in one response. + :paramtype continuation_token: str + """ + super(RemoteDeviceAdapterCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + self.continuation_token = kwargs.get('continuation_token', None) + + +class RemoteDeviceAdapterDeleteRequest(MethodRequestEmptyBodyBase): + """Deletes an existing remote device adapter. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar name: Required. Resource name. + :vartype name: str + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(RemoteDeviceAdapterDeleteRequest, self).__init__(**kwargs) + self.method_name = 'remoteDeviceAdapterDelete' # type: str + + +class RemoteDeviceAdapterGetRequest(MethodRequestEmptyBodyBase): + """Retrieves an existing remote device adapter. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar name: Required. Resource name. + :vartype name: str + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(RemoteDeviceAdapterGetRequest, self).__init__(**kwargs) + self.method_name = 'remoteDeviceAdapterGet' # type: str + + +class RemoteDeviceAdapterListRequest(MethodRequest): + """List all existing remote device adapters. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + """ + super(RemoteDeviceAdapterListRequest, self).__init__(**kwargs) + self.method_name = 'remoteDeviceAdapterList' # type: str + + +class RemoteDeviceAdapterProperties(msrest.serialization.Model): + """Remote device adapter properties. + + All required parameters must be populated in order to send to Azure. + + :ivar description: An optional description for the remote device adapter. + :vartype description: str + :ivar target: Required. The IoT device to which this remote device will connect. + :vartype target: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterTarget + :ivar iot_hub_device_connection: Required. Information that enables communication between the + IoT Hub and the IoT device - allowing this edge module to act as a transparent gateway between + the two. + :vartype iot_hub_device_connection: + ~azure.media.videoanalyzer.edge.models.IotHubDeviceConnection + """ + + _validation = { + 'target': {'required': True}, + 'iot_hub_device_connection': {'required': True}, + } + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'RemoteDeviceAdapterTarget'}, + 'iot_hub_device_connection': {'key': 'iotHubDeviceConnection', 'type': 'IotHubDeviceConnection'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword description: An optional description for the remote device adapter. + :paramtype description: str + :keyword target: Required. The IoT device to which this remote device will connect. + :paramtype target: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterTarget + :keyword iot_hub_device_connection: Required. Information that enables communication between + the IoT Hub and the IoT device - allowing this edge module to act as a transparent gateway + between the two. + :paramtype iot_hub_device_connection: + ~azure.media.videoanalyzer.edge.models.IotHubDeviceConnection + """ + super(RemoteDeviceAdapterProperties, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + self.target = kwargs['target'] + self.iot_hub_device_connection = kwargs['iot_hub_device_connection'] + + +class RemoteDeviceAdapterSetRequest(MethodRequest): + """Creates a new remote device adapter or updates an existing one. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar remote_device_adapter: Required. The Video Analyzer edge module can act as a transparent + gateway for video, enabling IoT devices to send video to the cloud from behind a firewall. A + remote device adapter should be created for each such IoT device. Communication between the + cloud and IoT device would then flow via the Video Analyzer edge module. + :vartype remote_device_adapter: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + 'remote_device_adapter': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'remote_device_adapter': {'key': 'remoteDeviceAdapter', 'type': 'RemoteDeviceAdapter'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword remote_device_adapter: Required. The Video Analyzer edge module can act as a + transparent gateway for video, enabling IoT devices to send video to the cloud from behind a + firewall. A remote device adapter should be created for each such IoT device. Communication + between the cloud and IoT device would then flow via the Video Analyzer edge module. + :paramtype remote_device_adapter: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter + """ + super(RemoteDeviceAdapterSetRequest, self).__init__(**kwargs) + self.method_name = 'remoteDeviceAdapterSet' # type: str + self.remote_device_adapter = kwargs['remote_device_adapter'] + + +class RemoteDeviceAdapterSetRequestBody(RemoteDeviceAdapter, MethodRequest): + """The Video Analyzer edge module can act as a transparent gateway for video, enabling IoT devices to send video to the cloud from behind a firewall. A remote device adapter should be created for each such IoT device. Communication between the cloud and IoT device would then flow via the Video Analyzer edge module. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar name: Required. The unique identifier for the remote device adapter. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Properties of the remote device adapter. + :vartype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, + 'properties': {'key': 'properties', 'type': 'RemoteDeviceAdapterProperties'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. The unique identifier for the remote device adapter. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Properties of the remote device adapter. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties + """ + super(RemoteDeviceAdapterSetRequestBody, self).__init__(**kwargs) + self.method_name = 'RemoteDeviceAdapterSetRequestBody' # type: str + self.api_version = kwargs.get('api_version', "1.1") + self.method_name = 'RemoteDeviceAdapterSetRequestBody' # type: str + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class RemoteDeviceAdapterTarget(msrest.serialization.Model): + """Properties of the remote device adapter target. + + All required parameters must be populated in order to send to Azure. + + :ivar host: Required. Hostname or IP address of the remote device. + :vartype host: str + """ + + _validation = { + 'host': {'required': True}, + } + + _attribute_map = { + 'host': {'key': 'host', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword host: Required. Hostname or IP address of the remote device. + :paramtype host: str + """ + super(RemoteDeviceAdapterTarget, self).__init__(**kwargs) + self.host = kwargs['host'] + + class RtspSource(SourceNodeBase): """RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a live pipeline. All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP connections alongside the RTSP messages. Possible values include: "http", "tcp". - :type transport: str or ~azure.media.videoanalyzer.edge.models.RtspTransport - :param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This + :vartype transport: str or ~azure.media.videoanalyzer.edge.models.RtspTransport + :ivar endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This contains the required information for Video Analyzer to connect to RTSP cameras and/or generic RTSP servers. - :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase """ _validation = { @@ -2074,6 +3405,20 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When + using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the + RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are + interleaved in the HTTP connections alongside the RTSP messages. Possible values include: + "http", "tcp". + :paramtype transport: str or ~azure.media.videoanalyzer.edge.models.RtspTransport + :keyword endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This + contains the required information for Video Analyzer to connect to RTSP cameras and/or generic + RTSP servers. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + """ super(RtspSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport = kwargs.get('transport', None) @@ -2083,15 +3428,15 @@ def __init__( class SamplingOptions(msrest.serialization.Model): """Defines how often media is submitted to the extension plugin. - :param skip_samples_without_annotation: When set to 'true', prevents frames without upstream + :ivar skip_samples_without_annotation: When set to 'true', prevents frames without upstream inference data to be sent to the extension plugin. This is useful to limit the frames sent to the extension to pre-analyzed frames only. For example, when used downstream from a motion detector, this can enable for only frames in which motion has been detected to be further analyzed. - :type skip_samples_without_annotation: str - :param maximum_samples_per_second: Maximum rate of samples submitted to the extension. This + :vartype skip_samples_without_annotation: str + :ivar maximum_samples_per_second: Maximum rate of samples submitted to the extension. This prevents an extension plugin to be overloaded with data. - :type maximum_samples_per_second: str + :vartype maximum_samples_per_second: str """ _attribute_map = { @@ -2103,6 +3448,17 @@ def __init__( self, **kwargs ): + """ + :keyword skip_samples_without_annotation: When set to 'true', prevents frames without upstream + inference data to be sent to the extension plugin. This is useful to limit the frames sent to + the extension to pre-analyzed frames only. For example, when used downstream from a motion + detector, this can enable for only frames in which motion has been detected to be further + analyzed. + :paramtype skip_samples_without_annotation: str + :keyword maximum_samples_per_second: Maximum rate of samples submitted to the extension. This + prevents an extension plugin to be overloaded with data. + :paramtype maximum_samples_per_second: str + """ super(SamplingOptions, self).__init__(**kwargs) self.skip_samples_without_annotation = kwargs.get('skip_samples_without_annotation', None) self.maximum_samples_per_second = kwargs.get('maximum_samples_per_second', None) @@ -2113,31 +3469,31 @@ class SignalGateProcessor(ProcessorNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param activation_evaluation_window: The period of time over which the gate gathers input - events before evaluating them. - :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar activation_evaluation_window: The period of time over which the gate gathers input events + before evaluating them. + :vartype activation_evaluation_window: str + :ivar activation_signal_offset: Signal offset once the gate is activated (can be negative). It determines the how much farther behind of after the signal will be let through based on the activation time. A negative offset indicates that data prior the activation time must be included on the signal that is let through, once the gate is activated. When used upstream of a file or video sink, this allows for scenarios such as recording buffered media prior an event, such as: record video 5 seconds prior motions is detected. - :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open in the + :vartype activation_signal_offset: str + :ivar minimum_activation_time: The minimum period for which the gate remains open in the absence of subsequent triggers (events). When used upstream of a file or video sink, it determines the minimum length of the recorded video clip. - :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open in the + :vartype minimum_activation_time: str + :ivar maximum_activation_time: The maximum period for which the gate remains open in the presence of subsequent triggers (events). When used upstream of a file or video sink, it determines the maximum length of the recorded video clip. - :type maximum_activation_time: str + :vartype maximum_activation_time: str """ _validation = { @@ -2160,6 +3516,31 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword activation_evaluation_window: The period of time over which the gate gathers input + events before evaluating them. + :paramtype activation_evaluation_window: str + :keyword activation_signal_offset: Signal offset once the gate is activated (can be negative). + It determines the how much farther behind of after the signal will be let through based on the + activation time. A negative offset indicates that data prior the activation time must be + included on the signal that is let through, once the gate is activated. When used upstream of a + file or video sink, this allows for scenarios such as recording buffered media prior an event, + such as: record video 5 seconds prior motions is detected. + :paramtype activation_signal_offset: str + :keyword minimum_activation_time: The minimum period for which the gate remains open in the + absence of subsequent triggers (events). When used upstream of a file or video sink, it + determines the minimum length of the recorded video clip. + :paramtype minimum_activation_time: str + :keyword maximum_activation_time: The maximum period for which the gate remains open in the + presence of subsequent triggers (events). When used upstream of a file or video sink, it + determines the maximum length of the recorded video clip. + :paramtype maximum_activation_time: str + """ super(SignalGateProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SignalGateProcessor' # type: str self.activation_evaluation_window = kwargs.get('activation_evaluation_window', None) @@ -2176,8 +3557,8 @@ class SpatialAnalysisOperationBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -2196,6 +3577,8 @@ def __init__( self, **kwargs ): + """ + """ super(SpatialAnalysisOperationBase, self).__init__(**kwargs) self.type = None # type: Optional[str] @@ -2205,11 +3588,11 @@ class SpatialAnalysisCustomOperation(SpatialAnalysisOperationBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param extension_configuration: Required. Custom configuration to pass to the Azure Cognitive + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar extension_configuration: Required. Custom configuration to pass to the Azure Cognitive Services Spatial Analysis module. - :type extension_configuration: str + :vartype extension_configuration: str """ _validation = { @@ -2226,6 +3609,11 @@ def __init__( self, **kwargs ): + """ + :keyword extension_configuration: Required. Custom configuration to pass to the Azure Cognitive + Services Spatial Analysis module. + :paramtype extension_configuration: str + """ super(SpatialAnalysisCustomOperation, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisCustomOperation' # type: str self.extension_configuration = kwargs['extension_configuration'] @@ -2234,11 +3622,11 @@ def __init__( class SpatialAnalysisOperationEventBase(msrest.serialization.Model): """Defines the Azure Cognitive Services Spatial Analysis operation eventing configuration. - :param threshold: The event threshold. - :type threshold: str - :param focus: The operation focus type. Possible values include: "center", "bottomCenter", + :ivar threshold: The event threshold. + :vartype threshold: str + :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". - :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus """ _attribute_map = { @@ -2250,6 +3638,13 @@ def __init__( self, **kwargs ): + """ + :keyword threshold: The event threshold. + :paramtype threshold: str + :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", + "footprint". + :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + """ super(SpatialAnalysisOperationEventBase, self).__init__(**kwargs) self.threshold = kwargs.get('threshold', None) self.focus = kwargs.get('focus', None) @@ -2258,16 +3653,16 @@ def __init__( class SpatialAnalysisPersonCountEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person count operation eventing configuration. - :param threshold: The event threshold. - :type threshold: str - :param focus: The operation focus type. Possible values include: "center", "bottomCenter", + :ivar threshold: The event threshold. + :vartype threshold: str + :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". - :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus - :param trigger: The event trigger type. Possible values include: "event", "interval". - :type trigger: str or + :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :ivar trigger: The event trigger type. Possible values include: "event", "interval". + :vartype trigger: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEventTrigger - :param output_frequency: The event or interval output frequency. - :type output_frequency: str + :ivar output_frequency: The event or interval output frequency. + :vartype output_frequency: str """ _attribute_map = { @@ -2281,6 +3676,18 @@ def __init__( self, **kwargs ): + """ + :keyword threshold: The event threshold. + :paramtype threshold: str + :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", + "footprint". + :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :keyword trigger: The event trigger type. Possible values include: "event", "interval". + :paramtype trigger: str or + ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEventTrigger + :keyword output_frequency: The event or interval output frequency. + :paramtype output_frequency: str + """ super(SpatialAnalysisPersonCountEvent, self).__init__(**kwargs) self.trigger = kwargs.get('trigger', None) self.output_frequency = kwargs.get('output_frequency', None) @@ -2294,17 +3701,23 @@ class SpatialAnalysisTypedOperationBase(SpatialAnalysisOperationBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param debug: If set to 'true', enables debugging mode for this operation. - :type debug: str - :param camera_configuration: Advanced camera configuration. - :type camera_configuration: str - :param detector_node_configuration: Advanced detector node configuration. - :type detector_node_configuration: str - :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar debug: If set to 'true', enables debugging mode for this operation. + :vartype debug: str + :ivar calibration_configuration: Advanced calibration configuration. + :vartype calibration_configuration: str + :ivar camera_configuration: Advanced camera configuration. + :vartype camera_configuration: str + :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :vartype camera_calibrator_node_configuration: str + :ivar detector_node_configuration: Advanced detector node configuration. + :vartype detector_node_configuration: str + :ivar tracker_node_configuration: Advanced tracker node configuration. + :vartype tracker_node_configuration: str + :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. - :type enable_face_mask_classifier: str + :vartype enable_face_mask_classifier: str """ _validation = { @@ -2314,8 +3727,11 @@ class SpatialAnalysisTypedOperationBase(SpatialAnalysisOperationBase): _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, + 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, + 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, + 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, } @@ -2327,11 +3743,31 @@ def __init__( self, **kwargs ): + """ + :keyword debug: If set to 'true', enables debugging mode for this operation. + :paramtype debug: str + :keyword calibration_configuration: Advanced calibration configuration. + :paramtype calibration_configuration: str + :keyword camera_configuration: Advanced camera configuration. + :paramtype camera_configuration: str + :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :paramtype camera_calibrator_node_configuration: str + :keyword detector_node_configuration: Advanced detector node configuration. + :paramtype detector_node_configuration: str + :keyword tracker_node_configuration: Advanced tracker node configuration. + :paramtype tracker_node_configuration: str + :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this + operation. + :paramtype enable_face_mask_classifier: str + """ super(SpatialAnalysisTypedOperationBase, self).__init__(**kwargs) self.type = 'SpatialAnalysisTypedOperationBase' # type: str self.debug = kwargs.get('debug', None) + self.calibration_configuration = kwargs.get('calibration_configuration', None) self.camera_configuration = kwargs.get('camera_configuration', None) + self.camera_calibrator_node_configuration = kwargs.get('camera_calibrator_node_configuration', None) self.detector_node_configuration = kwargs.get('detector_node_configuration', None) + self.tracker_node_configuration = kwargs.get('tracker_node_configuration', None) self.enable_face_mask_classifier = kwargs.get('enable_face_mask_classifier', None) @@ -2340,19 +3776,26 @@ class SpatialAnalysisPersonCountOperation(SpatialAnalysisTypedOperationBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param debug: If set to 'true', enables debugging mode for this operation. - :type debug: str - :param camera_configuration: Advanced camera configuration. - :type camera_configuration: str - :param detector_node_configuration: Advanced detector node configuration. - :type detector_node_configuration: str - :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar debug: If set to 'true', enables debugging mode for this operation. + :vartype debug: str + :ivar calibration_configuration: Advanced calibration configuration. + :vartype calibration_configuration: str + :ivar camera_configuration: Advanced camera configuration. + :vartype camera_configuration: str + :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :vartype camera_calibrator_node_configuration: str + :ivar detector_node_configuration: Advanced detector node configuration. + :vartype detector_node_configuration: str + :ivar tracker_node_configuration: Advanced tracker node configuration. + :vartype tracker_node_configuration: str + :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. - :type enable_face_mask_classifier: str - :param zones: Required. The list of zones and optional events. - :type zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountZoneEvents] + :vartype enable_face_mask_classifier: str + :ivar zones: Required. The list of zones and optional events. + :vartype zones: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountZoneEvents] """ _validation = { @@ -2363,8 +3806,11 @@ class SpatialAnalysisPersonCountOperation(SpatialAnalysisTypedOperationBase): _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, + 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, + 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, + 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonCountZoneEvents]'}, } @@ -2373,6 +3819,26 @@ def __init__( self, **kwargs ): + """ + :keyword debug: If set to 'true', enables debugging mode for this operation. + :paramtype debug: str + :keyword calibration_configuration: Advanced calibration configuration. + :paramtype calibration_configuration: str + :keyword camera_configuration: Advanced camera configuration. + :paramtype camera_configuration: str + :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :paramtype camera_calibrator_node_configuration: str + :keyword detector_node_configuration: Advanced detector node configuration. + :paramtype detector_node_configuration: str + :keyword tracker_node_configuration: Advanced tracker node configuration. + :paramtype tracker_node_configuration: str + :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this + operation. + :paramtype enable_face_mask_classifier: str + :keyword zones: Required. The list of zones and optional events. + :paramtype zones: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountZoneEvents] + """ super(SpatialAnalysisPersonCountOperation, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonCountOperation' # type: str self.zones = kwargs['zones'] @@ -2383,10 +3849,10 @@ class SpatialAnalysisPersonCountZoneEvents(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param zone: Required. The named zone. - :type zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase - :param events: The event configuration. - :type events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEvent] + :ivar zone: Required. The named zone. + :vartype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :ivar events: The event configuration. + :vartype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEvent] """ _validation = { @@ -2402,6 +3868,12 @@ def __init__( self, **kwargs ): + """ + :keyword zone: Required. The named zone. + :paramtype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :keyword events: The event configuration. + :paramtype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEvent] + """ super(SpatialAnalysisPersonCountZoneEvents, self).__init__(**kwargs) self.zone = kwargs['zone'] self.events = kwargs.get('events', None) @@ -2410,20 +3882,20 @@ def __init__( class SpatialAnalysisPersonDistanceEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person distance operation eventing configuration. - :param threshold: The event threshold. - :type threshold: str - :param focus: The operation focus type. Possible values include: "center", "bottomCenter", + :ivar threshold: The event threshold. + :vartype threshold: str + :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". - :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus - :param trigger: The event trigger type. Possible values include: "event", "interval". - :type trigger: str or + :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :ivar trigger: The event trigger type. Possible values include: "event", "interval". + :vartype trigger: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEventTrigger - :param output_frequency: The event or interval output frequency. - :type output_frequency: str - :param minimum_distance_threshold: The minimum distance threshold. - :type minimum_distance_threshold: str - :param maximum_distance_threshold: The maximum distance threshold. - :type maximum_distance_threshold: str + :ivar output_frequency: The event or interval output frequency. + :vartype output_frequency: str + :ivar minimum_distance_threshold: The minimum distance threshold. + :vartype minimum_distance_threshold: str + :ivar maximum_distance_threshold: The maximum distance threshold. + :vartype maximum_distance_threshold: str """ _attribute_map = { @@ -2439,6 +3911,22 @@ def __init__( self, **kwargs ): + """ + :keyword threshold: The event threshold. + :paramtype threshold: str + :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", + "footprint". + :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :keyword trigger: The event trigger type. Possible values include: "event", "interval". + :paramtype trigger: str or + ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEventTrigger + :keyword output_frequency: The event or interval output frequency. + :paramtype output_frequency: str + :keyword minimum_distance_threshold: The minimum distance threshold. + :paramtype minimum_distance_threshold: str + :keyword maximum_distance_threshold: The maximum distance threshold. + :paramtype maximum_distance_threshold: str + """ super(SpatialAnalysisPersonDistanceEvent, self).__init__(**kwargs) self.trigger = kwargs.get('trigger', None) self.output_frequency = kwargs.get('output_frequency', None) @@ -2451,19 +3939,25 @@ class SpatialAnalysisPersonDistanceOperation(SpatialAnalysisTypedOperationBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param debug: If set to 'true', enables debugging mode for this operation. - :type debug: str - :param camera_configuration: Advanced camera configuration. - :type camera_configuration: str - :param detector_node_configuration: Advanced detector node configuration. - :type detector_node_configuration: str - :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar debug: If set to 'true', enables debugging mode for this operation. + :vartype debug: str + :ivar calibration_configuration: Advanced calibration configuration. + :vartype calibration_configuration: str + :ivar camera_configuration: Advanced camera configuration. + :vartype camera_configuration: str + :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :vartype camera_calibrator_node_configuration: str + :ivar detector_node_configuration: Advanced detector node configuration. + :vartype detector_node_configuration: str + :ivar tracker_node_configuration: Advanced tracker node configuration. + :vartype tracker_node_configuration: str + :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. - :type enable_face_mask_classifier: str - :param zones: Required. The list of zones with optional events. - :type zones: + :vartype enable_face_mask_classifier: str + :ivar zones: Required. The list of zones with optional events. + :vartype zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceZoneEvents] """ @@ -2475,8 +3969,11 @@ class SpatialAnalysisPersonDistanceOperation(SpatialAnalysisTypedOperationBase): _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, + 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, + 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, + 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonDistanceZoneEvents]'}, } @@ -2485,6 +3982,26 @@ def __init__( self, **kwargs ): + """ + :keyword debug: If set to 'true', enables debugging mode for this operation. + :paramtype debug: str + :keyword calibration_configuration: Advanced calibration configuration. + :paramtype calibration_configuration: str + :keyword camera_configuration: Advanced camera configuration. + :paramtype camera_configuration: str + :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :paramtype camera_calibrator_node_configuration: str + :keyword detector_node_configuration: Advanced detector node configuration. + :paramtype detector_node_configuration: str + :keyword tracker_node_configuration: Advanced tracker node configuration. + :paramtype tracker_node_configuration: str + :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this + operation. + :paramtype enable_face_mask_classifier: str + :keyword zones: Required. The list of zones with optional events. + :paramtype zones: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceZoneEvents] + """ super(SpatialAnalysisPersonDistanceOperation, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonDistanceOperation' # type: str self.zones = kwargs['zones'] @@ -2495,10 +4012,11 @@ class SpatialAnalysisPersonDistanceZoneEvents(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param zone: Required. The named zone. - :type zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase - :param events: The event configuration. - :type events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEvent] + :ivar zone: Required. The named zone. + :vartype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :ivar events: The event configuration. + :vartype events: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEvent] """ _validation = { @@ -2514,6 +4032,13 @@ def __init__( self, **kwargs ): + """ + :keyword zone: Required. The named zone. + :paramtype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :keyword events: The event configuration. + :paramtype events: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEvent] + """ super(SpatialAnalysisPersonDistanceZoneEvents, self).__init__(**kwargs) self.zone = kwargs['zone'] self.events = kwargs.get('events', None) @@ -2522,11 +4047,11 @@ def __init__( class SpatialAnalysisPersonLineCrossingEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person line crossing operation eventing configuration. - :param threshold: The event threshold. - :type threshold: str - :param focus: The operation focus type. Possible values include: "center", "bottomCenter", + :ivar threshold: The event threshold. + :vartype threshold: str + :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". - :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus """ _attribute_map = { @@ -2538,6 +4063,13 @@ def __init__( self, **kwargs ): + """ + :keyword threshold: The event threshold. + :paramtype threshold: str + :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", + "footprint". + :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + """ super(SpatialAnalysisPersonLineCrossingEvent, self).__init__(**kwargs) @@ -2546,10 +4078,10 @@ class SpatialAnalysisPersonLineCrossingLineEvents(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param line: Required. The named line. - :type line: ~azure.media.videoanalyzer.edge.models.NamedLineBase - :param events: The event configuration. - :type events: + :ivar line: Required. The named line. + :vartype line: ~azure.media.videoanalyzer.edge.models.NamedLineBase + :ivar events: The event configuration. + :vartype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingEvent] """ @@ -2566,6 +4098,13 @@ def __init__( self, **kwargs ): + """ + :keyword line: Required. The named line. + :paramtype line: ~azure.media.videoanalyzer.edge.models.NamedLineBase + :keyword events: The event configuration. + :paramtype events: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingEvent] + """ super(SpatialAnalysisPersonLineCrossingLineEvents, self).__init__(**kwargs) self.line = kwargs['line'] self.events = kwargs.get('events', None) @@ -2576,19 +4115,25 @@ class SpatialAnalysisPersonLineCrossingOperation(SpatialAnalysisTypedOperationBa All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param debug: If set to 'true', enables debugging mode for this operation. - :type debug: str - :param camera_configuration: Advanced camera configuration. - :type camera_configuration: str - :param detector_node_configuration: Advanced detector node configuration. - :type detector_node_configuration: str - :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar debug: If set to 'true', enables debugging mode for this operation. + :vartype debug: str + :ivar calibration_configuration: Advanced calibration configuration. + :vartype calibration_configuration: str + :ivar camera_configuration: Advanced camera configuration. + :vartype camera_configuration: str + :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :vartype camera_calibrator_node_configuration: str + :ivar detector_node_configuration: Advanced detector node configuration. + :vartype detector_node_configuration: str + :ivar tracker_node_configuration: Advanced tracker node configuration. + :vartype tracker_node_configuration: str + :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. - :type enable_face_mask_classifier: str - :param lines: Required. The list of lines with optional events. - :type lines: + :vartype enable_face_mask_classifier: str + :ivar lines: Required. The list of lines with optional events. + :vartype lines: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingLineEvents] """ @@ -2600,8 +4145,11 @@ class SpatialAnalysisPersonLineCrossingOperation(SpatialAnalysisTypedOperationBa _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, + 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, + 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, + 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'lines': {'key': 'lines', 'type': '[SpatialAnalysisPersonLineCrossingLineEvents]'}, } @@ -2610,6 +4158,26 @@ def __init__( self, **kwargs ): + """ + :keyword debug: If set to 'true', enables debugging mode for this operation. + :paramtype debug: str + :keyword calibration_configuration: Advanced calibration configuration. + :paramtype calibration_configuration: str + :keyword camera_configuration: Advanced camera configuration. + :paramtype camera_configuration: str + :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :paramtype camera_calibrator_node_configuration: str + :keyword detector_node_configuration: Advanced detector node configuration. + :paramtype detector_node_configuration: str + :keyword tracker_node_configuration: Advanced tracker node configuration. + :paramtype tracker_node_configuration: str + :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this + operation. + :paramtype enable_face_mask_classifier: str + :keyword lines: Required. The list of lines with optional events. + :paramtype lines: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingLineEvents] + """ super(SpatialAnalysisPersonLineCrossingOperation, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonLineCrossingOperation' # type: str self.lines = kwargs['lines'] @@ -2618,13 +4186,13 @@ def __init__( class SpatialAnalysisPersonZoneCrossingEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person crossing zone operation eventing configuration. - :param threshold: The event threshold. - :type threshold: str - :param focus: The operation focus type. Possible values include: "center", "bottomCenter", + :ivar threshold: The event threshold. + :vartype threshold: str + :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". - :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus - :param event_type: The event type. Possible values include: "zoneCrossing", "zoneDwellTime". - :type event_type: str or + :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :ivar event_type: The event type. Possible values include: "zoneCrossing", "zoneDwellTime". + :vartype event_type: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEventType """ @@ -2638,6 +4206,16 @@ def __init__( self, **kwargs ): + """ + :keyword threshold: The event threshold. + :paramtype threshold: str + :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", + "footprint". + :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :keyword event_type: The event type. Possible values include: "zoneCrossing", "zoneDwellTime". + :paramtype event_type: str or + ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEventType + """ super(SpatialAnalysisPersonZoneCrossingEvent, self).__init__(**kwargs) self.event_type = kwargs.get('event_type', None) @@ -2647,19 +4225,25 @@ class SpatialAnalysisPersonZoneCrossingOperation(SpatialAnalysisTypedOperationBa All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param debug: If set to 'true', enables debugging mode for this operation. - :type debug: str - :param camera_configuration: Advanced camera configuration. - :type camera_configuration: str - :param detector_node_configuration: Advanced detector node configuration. - :type detector_node_configuration: str - :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar debug: If set to 'true', enables debugging mode for this operation. + :vartype debug: str + :ivar calibration_configuration: Advanced calibration configuration. + :vartype calibration_configuration: str + :ivar camera_configuration: Advanced camera configuration. + :vartype camera_configuration: str + :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :vartype camera_calibrator_node_configuration: str + :ivar detector_node_configuration: Advanced detector node configuration. + :vartype detector_node_configuration: str + :ivar tracker_node_configuration: Advanced tracker node configuration. + :vartype tracker_node_configuration: str + :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. - :type enable_face_mask_classifier: str - :param zones: Required. The list of zones with optional events. - :type zones: + :vartype enable_face_mask_classifier: str + :ivar zones: Required. The list of zones with optional events. + :vartype zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingZoneEvents] """ @@ -2671,8 +4255,11 @@ class SpatialAnalysisPersonZoneCrossingOperation(SpatialAnalysisTypedOperationBa _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, + 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, + 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, + 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonZoneCrossingZoneEvents]'}, } @@ -2681,6 +4268,26 @@ def __init__( self, **kwargs ): + """ + :keyword debug: If set to 'true', enables debugging mode for this operation. + :paramtype debug: str + :keyword calibration_configuration: Advanced calibration configuration. + :paramtype calibration_configuration: str + :keyword camera_configuration: Advanced camera configuration. + :paramtype camera_configuration: str + :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :paramtype camera_calibrator_node_configuration: str + :keyword detector_node_configuration: Advanced detector node configuration. + :paramtype detector_node_configuration: str + :keyword tracker_node_configuration: Advanced tracker node configuration. + :paramtype tracker_node_configuration: str + :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this + operation. + :paramtype enable_face_mask_classifier: str + :keyword zones: Required. The list of zones with optional events. + :paramtype zones: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingZoneEvents] + """ super(SpatialAnalysisPersonZoneCrossingOperation, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonZoneCrossingOperation' # type: str self.zones = kwargs['zones'] @@ -2691,10 +4298,10 @@ class SpatialAnalysisPersonZoneCrossingZoneEvents(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param zone: Required. The named zone. - :type zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase - :param events: The event configuration. - :type events: + :ivar zone: Required. The named zone. + :vartype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :ivar events: The event configuration. + :vartype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEvent] """ @@ -2711,20 +4318,61 @@ def __init__( self, **kwargs ): + """ + :keyword zone: Required. The named zone. + :paramtype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :keyword events: The event configuration. + :paramtype events: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEvent] + """ super(SpatialAnalysisPersonZoneCrossingZoneEvents, self).__init__(**kwargs) self.zone = kwargs['zone'] self.events = kwargs.get('events', None) +class SymmetricKeyCredentials(CredentialsBase): + """Symmetric key credential. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar key: Required. Symmetric key credential. + :vartype key: str + """ + + _validation = { + 'type': {'required': True}, + 'key': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword key: Required. Symmetric key credential. + :paramtype key: str + """ + super(SymmetricKeyCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.VideoAnalyzer.SymmetricKeyCredentials' # type: str + self.key = kwargs['key'] + + class SystemData(msrest.serialization.Model): """Read-only system metadata associated with a resource. - :param created_at: Date and time when this resource was first created. Value is represented in + :ivar created_at: Date and time when this resource was first created. Value is represented in UTC according to the ISO8601 date format. - :type created_at: ~datetime.datetime - :param last_modified_at: Date and time when this resource was last modified. Value is + :vartype created_at: ~datetime.datetime + :ivar last_modified_at: Date and time when this resource was last modified. Value is represented in UTC according to the ISO8601 date format. - :type last_modified_at: ~datetime.datetime + :vartype last_modified_at: ~datetime.datetime """ _attribute_map = { @@ -2736,6 +4384,14 @@ def __init__( self, **kwargs ): + """ + :keyword created_at: Date and time when this resource was first created. Value is represented + in UTC according to the ISO8601 date format. + :paramtype created_at: ~datetime.datetime + :keyword last_modified_at: Date and time when this resource was last modified. Value is + represented in UTC according to the ISO8601 date format. + :paramtype last_modified_at: ~datetime.datetime + """ super(SystemData, self).__init__(**kwargs) self.created_at = kwargs.get('created_at', None) self.last_modified_at = kwargs.get('last_modified_at', None) @@ -2746,19 +4402,19 @@ class TlsEndpoint(EndpointBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param credentials: Credentials to be presented to the endpoint. - :type credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase - :param url: Required. The endpoint URL for Video Analyzer to connect to. - :type url: str - :param trusted_certificates: List of trusted certificate authorities when authenticating a TLS + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar credentials: Credentials to be presented to the endpoint. + :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :ivar url: Required. The endpoint URL for Video Analyzer to connect to. + :vartype url: str + :ivar trusted_certificates: List of trusted certificate authorities when authenticating a TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities should be used. - :type trusted_certificates: ~azure.media.videoanalyzer.edge.models.CertificateSource - :param validation_options: Validation options to use when authenticating a TLS connection. By + :vartype trusted_certificates: ~azure.media.videoanalyzer.edge.models.CertificateSource + :ivar validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. - :type validation_options: ~azure.media.videoanalyzer.edge.models.TlsValidationOptions + :vartype validation_options: ~azure.media.videoanalyzer.edge.models.TlsValidationOptions """ _validation = { @@ -2778,6 +4434,19 @@ def __init__( self, **kwargs ): + """ + :keyword credentials: Credentials to be presented to the endpoint. + :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :keyword url: Required. The endpoint URL for Video Analyzer to connect to. + :paramtype url: str + :keyword trusted_certificates: List of trusted certificate authorities when authenticating a + TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities + should be used. + :paramtype trusted_certificates: ~azure.media.videoanalyzer.edge.models.CertificateSource + :keyword validation_options: Validation options to use when authenticating a TLS connection. By + default, strict validation is used. + :paramtype validation_options: ~azure.media.videoanalyzer.edge.models.TlsValidationOptions + """ super(TlsEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates = kwargs.get('trusted_certificates', None) @@ -2787,12 +4456,12 @@ def __init__( class TlsValidationOptions(msrest.serialization.Model): """Options for controlling the validation of TLS endpoints. - :param ignore_hostname: When set to 'true' causes the certificate subject name validation to be + :ivar ignore_hostname: When set to 'true' causes the certificate subject name validation to be skipped. Default is 'false'. - :type ignore_hostname: str - :param ignore_signature: When set to 'true' causes the certificate chain trust validation to be + :vartype ignore_hostname: str + :ivar ignore_signature: When set to 'true' causes the certificate chain trust validation to be skipped. Default is 'false'. - :type ignore_signature: str + :vartype ignore_signature: str """ _attribute_map = { @@ -2804,6 +4473,14 @@ def __init__( self, **kwargs ): + """ + :keyword ignore_hostname: When set to 'true' causes the certificate subject name validation to + be skipped. Default is 'false'. + :paramtype ignore_hostname: str + :keyword ignore_signature: When set to 'true' causes the certificate chain trust validation to + be skipped. Default is 'false'. + :paramtype ignore_signature: str + """ super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = kwargs.get('ignore_hostname', None) self.ignore_signature = kwargs.get('ignore_signature', None) @@ -2814,12 +4491,12 @@ class UnsecuredEndpoint(EndpointBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param credentials: Credentials to be presented to the endpoint. - :type credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase - :param url: Required. The endpoint URL for Video Analyzer to connect to. - :type url: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar credentials: Credentials to be presented to the endpoint. + :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :ivar url: Required. The endpoint URL for Video Analyzer to connect to. + :vartype url: str """ _validation = { @@ -2837,6 +4514,12 @@ def __init__( self, **kwargs ): + """ + :keyword credentials: Credentials to be presented to the endpoint. + :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :keyword url: Required. The endpoint URL for Video Analyzer to connect to. + :paramtype url: str + """ super(UnsecuredEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str @@ -2846,14 +4529,14 @@ class UsernamePasswordCredentials(CredentialsBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param username: Required. Username to be presented as part of the credentials. - :type username: str - :param password: Required. Password to be presented as part of the credentials. It is + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar username: Required. Username to be presented as part of the credentials. + :vartype username: str + :ivar password: Required. Password to be presented as part of the credentials. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. - :type password: str + :vartype password: str """ _validation = { @@ -2872,6 +4555,14 @@ def __init__( self, **kwargs ): + """ + :keyword username: Required. Username to be presented as part of the credentials. + :paramtype username: str + :keyword password: Required. Password to be presented as part of the credentials. It is + recommended that this value is parameterized as a secret string in order to prevent this value + to be returned as part of the resource on API requests. + :paramtype password: str + """ super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username = kwargs['username'] @@ -2881,36 +4572,170 @@ def __init__( class VideoCreationProperties(msrest.serialization.Model): """Optional video properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists. - :param title: Optional video title provided by the user. Value can be up to 256 characters - long. - :type title: str - :param description: Optional video description provided by the user. Value can be up to 2048 + :ivar title: Optional video title provided by the user. Value can be up to 256 characters long. + :vartype title: str + :ivar description: Optional video description provided by the user. Value can be up to 2048 characters long. - :type description: str - :param segment_length: Video segment length indicates the length of individual video files + :vartype description: str + :ivar segment_length: Video segment length indicates the length of individual video files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. Changing this value after the video is initially created can lead to errors when uploading media to the archive. Default value is 30 seconds. - :type segment_length: str + :vartype segment_length: str + :ivar retention_period: Video retention period indicates how long the video is kept in storage, + and must be a multiple of 1 day. For example, if this is set to 30 days, then content older + than 30 days will be deleted. + :vartype retention_period: str """ _attribute_map = { 'title': {'key': 'title', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'}, + 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__( self, **kwargs ): + """ + :keyword title: Optional video title provided by the user. Value can be up to 256 characters + long. + :paramtype title: str + :keyword description: Optional video description provided by the user. Value can be up to 2048 + characters long. + :paramtype description: str + :keyword segment_length: Video segment length indicates the length of individual video files + (segments) which are persisted to storage. Smaller segments provide lower archive playback + latency but generate larger volume of storage transactions. Larger segments reduce the amount + of storage transactions while increasing the archive playback latency. Value must be specified + in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to + 5 minutes, in 30 seconds increments. Changing this value after the video is initially created + can lead to errors when uploading media to the archive. Default value is 30 seconds. + :paramtype segment_length: str + :keyword retention_period: Video retention period indicates how long the video is kept in + storage, and must be a multiple of 1 day. For example, if this is set to 30 days, then content + older than 30 days will be deleted. + :paramtype retention_period: str + """ super(VideoCreationProperties, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description', None) self.segment_length = kwargs.get('segment_length', None) + self.retention_period = kwargs.get('retention_period', None) + + +class VideoEncoderConfiguration(msrest.serialization.Model): + """Class representing the MPEG4 Configuration. + + :ivar encoding: The video codec used by the Media Profile. Possible values include: "JPEG", + "H264", "MPEG4". + :vartype encoding: str or ~azure.media.videoanalyzer.edge.models.VideoEncoding + :ivar quality: Relative value representing the quality of the video. + :vartype quality: float + :ivar resolution: The Video Resolution. + :vartype resolution: ~azure.media.videoanalyzer.edge.models.VideoResolution + :ivar rate_control: The Video's rate control. + :vartype rate_control: ~azure.media.videoanalyzer.edge.models.RateControl + :ivar h264: The H264 Configuration. + :vartype h264: ~azure.media.videoanalyzer.edge.models.H264Configuration + :ivar mpeg4: The H264 Configuration. + :vartype mpeg4: ~azure.media.videoanalyzer.edge.models.MPEG4Configuration + """ + + _attribute_map = { + 'encoding': {'key': 'encoding', 'type': 'str'}, + 'quality': {'key': 'quality', 'type': 'float'}, + 'resolution': {'key': 'resolution', 'type': 'VideoResolution'}, + 'rate_control': {'key': 'rateControl', 'type': 'RateControl'}, + 'h264': {'key': 'h264', 'type': 'H264Configuration'}, + 'mpeg4': {'key': 'mpeg4', 'type': 'MPEG4Configuration'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword encoding: The video codec used by the Media Profile. Possible values include: "JPEG", + "H264", "MPEG4". + :paramtype encoding: str or ~azure.media.videoanalyzer.edge.models.VideoEncoding + :keyword quality: Relative value representing the quality of the video. + :paramtype quality: float + :keyword resolution: The Video Resolution. + :paramtype resolution: ~azure.media.videoanalyzer.edge.models.VideoResolution + :keyword rate_control: The Video's rate control. + :paramtype rate_control: ~azure.media.videoanalyzer.edge.models.RateControl + :keyword h264: The H264 Configuration. + :paramtype h264: ~azure.media.videoanalyzer.edge.models.H264Configuration + :keyword mpeg4: The H264 Configuration. + :paramtype mpeg4: ~azure.media.videoanalyzer.edge.models.MPEG4Configuration + """ + super(VideoEncoderConfiguration, self).__init__(**kwargs) + self.encoding = kwargs.get('encoding', None) + self.quality = kwargs.get('quality', None) + self.resolution = kwargs.get('resolution', None) + self.rate_control = kwargs.get('rate_control', None) + self.h264 = kwargs.get('h264', None) + self.mpeg4 = kwargs.get('mpeg4', None) + + +class VideoPublishingOptions(msrest.serialization.Model): + """Options for changing video publishing behavior on the video sink and output video. + + :ivar enable_video_preview_image: When set to 'true' the video will publish preview images. + Default is 'false'. + :vartype enable_video_preview_image: str + """ + + _attribute_map = { + 'enable_video_preview_image': {'key': 'enableVideoPreviewImage', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword enable_video_preview_image: When set to 'true' the video will publish preview images. + Default is 'false'. + :paramtype enable_video_preview_image: str + """ + super(VideoPublishingOptions, self).__init__(**kwargs) + self.enable_video_preview_image = kwargs.get('enable_video_preview_image', None) + + +class VideoResolution(msrest.serialization.Model): + """The Video resolution. + + :ivar width: The number of columns of the Video image. + :vartype width: float + :ivar height: The number of lines of the Video image. + :vartype height: float + """ + + _attribute_map = { + 'width': {'key': 'width', 'type': 'float'}, + 'height': {'key': 'height', 'type': 'float'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword width: The number of columns of the Video image. + :paramtype width: float + :keyword height: The number of lines of the Video image. + :paramtype height: float + """ + super(VideoResolution, self).__init__(**kwargs) + self.width = kwargs.get('width', None) + self.height = kwargs.get('height', None) class VideoSink(SinkNodeBase): @@ -2918,28 +4743,33 @@ class VideoSink(SinkNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param video_name: Required. Name of a new or existing Video Analyzer video resource used for + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar video_name: Required. Name of a new or existing Video Analyzer video resource used for the media recording. - :type video_name: str - :param video_creation_properties: Optional video properties to be used in case a new video + :vartype video_name: str + :ivar video_creation_properties: Optional video properties to be used in case a new video resource needs to be created on the service. - :type video_creation_properties: ~azure.media.videoanalyzer.edge.models.VideoCreationProperties - :param local_media_cache_path: Required. Path to a local file system directory for caching of + :vartype video_creation_properties: + ~azure.media.videoanalyzer.edge.models.VideoCreationProperties + :ivar video_publishing_options: Optional video publishing options to be used for changing + publishing behavior of the output video. + :vartype video_publishing_options: + ~azure.media.videoanalyzer.edge.models.VideoPublishingOptions + :ivar local_media_cache_path: Required. Path to a local file system directory for caching of temporary media files. This will also be used to store content which cannot be immediately uploaded to Azure due to Internet connectivity issues. - :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be + :vartype local_media_cache_path: str + :ivar local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be used for caching of temporary media files. Once this limit is reached, the oldest segments of the media archive will be continuously deleted in order to make space for new media, thus leading to gaps in the cloud recorded content. - :type local_media_cache_maximum_size_mi_b: str + :vartype local_media_cache_maximum_size_mi_b: str """ _validation = { @@ -2957,6 +4787,7 @@ class VideoSink(SinkNodeBase): 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, + 'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, } @@ -2965,9 +4796,37 @@ def __init__( self, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword video_name: Required. Name of a new or existing Video Analyzer video resource used for + the media recording. + :paramtype video_name: str + :keyword video_creation_properties: Optional video properties to be used in case a new video + resource needs to be created on the service. + :paramtype video_creation_properties: + ~azure.media.videoanalyzer.edge.models.VideoCreationProperties + :keyword video_publishing_options: Optional video publishing options to be used for changing + publishing behavior of the output video. + :paramtype video_publishing_options: + ~azure.media.videoanalyzer.edge.models.VideoPublishingOptions + :keyword local_media_cache_path: Required. Path to a local file system directory for caching of + temporary media files. This will also be used to store content which cannot be immediately + uploaded to Azure due to Internet connectivity issues. + :paramtype local_media_cache_path: str + :keyword local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can + be used for caching of temporary media files. Once this limit is reached, the oldest segments + of the media archive will be continuously deleted in order to make space for new media, thus + leading to gaps in the cloud recorded content. + :paramtype local_media_cache_maximum_size_mi_b: str + """ super(VideoSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name = kwargs['video_name'] self.video_creation_properties = kwargs.get('video_creation_properties', None) + self.video_publishing_options = kwargs.get('video_publishing_options', None) self.local_media_cache_path = kwargs['local_media_cache_path'] self.local_media_cache_maximum_size_mi_b = kwargs['local_media_cache_maximum_size_mi_b'] diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_models_py3.py b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_models_py3.py index e606b102579c..a78b99e43b11 100644 --- a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_models_py3.py +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_generated/models/_models_py3.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- import datetime -from typing import List, Optional, Union +from typing import Any, List, Optional, Union import msrest.serialization @@ -22,8 +22,8 @@ class CertificateSource(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -42,6 +42,8 @@ def __init__( self, **kwargs ): + """ + """ super(CertificateSource, self).__init__(**kwargs) self.type = None # type: Optional[str] @@ -54,13 +56,13 @@ class ProcessorNodeBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] """ _validation = { @@ -86,6 +88,13 @@ def __init__( inputs: List["NodeInput"], **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + """ super(ProcessorNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name @@ -97,23 +106,23 @@ class CognitiveServicesVisionProcessor(ProcessorNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties - :param sampling_options: Describes the sampling options to be applied when forwarding samples - to the extension. - :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions - :param operation: Required. Describes the Spatial Analysis operation to be used in the - Cognitive Services Vision processor. - :type operation: ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationBase + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar endpoint: Required. Endpoint to which this processor should connect. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :ivar image: Describes the parameters of the image that is sent as input to the endpoint. + :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :ivar sampling_options: Describes the sampling options to be applied when forwarding samples to + the extension. + :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :ivar operation: Required. Describes the Spatial Analysis operation to be used in the Cognitive + Services Vision processor. + :vartype operation: ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationBase """ _validation = { @@ -145,6 +154,23 @@ def __init__( sampling_options: Optional["SamplingOptions"] = None, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword endpoint: Required. Endpoint to which this processor should connect. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :keyword image: Describes the parameters of the image that is sent as input to the endpoint. + :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :keyword sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :keyword operation: Required. Describes the Spatial Analysis operation to be used in the + Cognitive Services Vision processor. + :paramtype operation: ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationBase + """ super(CognitiveServicesVisionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.CognitiveServicesVisionProcessor' # type: str self.endpoint = endpoint @@ -157,12 +183,12 @@ class CredentialsBase(msrest.serialization.Model): """Base class for credential objects. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: HttpHeaderCredentials, UsernamePasswordCredentials. + sub-classes are: HttpHeaderCredentials, SymmetricKeyCredentials, UsernamePasswordCredentials. All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -174,17 +200,99 @@ class CredentialsBase(msrest.serialization.Model): } _subtype_map = { - 'type': {'#Microsoft.VideoAnalyzer.HttpHeaderCredentials': 'HttpHeaderCredentials', '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} + 'type': {'#Microsoft.VideoAnalyzer.HttpHeaderCredentials': 'HttpHeaderCredentials', '#Microsoft.VideoAnalyzer.SymmetricKeyCredentials': 'SymmetricKeyCredentials', '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self, **kwargs ): + """ + """ super(CredentialsBase, self).__init__(**kwargs) self.type = None # type: Optional[str] +class DiscoveredOnvifDevice(msrest.serialization.Model): + """The discovered properties of the ONVIF device that are returned during the discovery. + + :ivar service_identifier: The unique identifier of the ONVIF device that was discovered in the + same subnet as the IoT Edge device. + :vartype service_identifier: str + :ivar remote_ip_address: The IP address of the ONVIF device that was discovered in the same + subnet as the IoT Edge device. + :vartype remote_ip_address: str + :ivar scopes: An array of hostnames for the ONVIF discovered devices that are in the same + subnet as the IoT Edge device. + :vartype scopes: list[str] + :ivar endpoints: An array of media profile endpoints that the ONVIF discovered device supports. + :vartype endpoints: list[str] + """ + + _attribute_map = { + 'service_identifier': {'key': 'serviceIdentifier', 'type': 'str'}, + 'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'}, + 'scopes': {'key': 'scopes', 'type': '[str]'}, + 'endpoints': {'key': 'endpoints', 'type': '[str]'}, + } + + def __init__( + self, + *, + service_identifier: Optional[str] = None, + remote_ip_address: Optional[str] = None, + scopes: Optional[List[str]] = None, + endpoints: Optional[List[str]] = None, + **kwargs + ): + """ + :keyword service_identifier: The unique identifier of the ONVIF device that was discovered in + the same subnet as the IoT Edge device. + :paramtype service_identifier: str + :keyword remote_ip_address: The IP address of the ONVIF device that was discovered in the same + subnet as the IoT Edge device. + :paramtype remote_ip_address: str + :keyword scopes: An array of hostnames for the ONVIF discovered devices that are in the same + subnet as the IoT Edge device. + :paramtype scopes: list[str] + :keyword endpoints: An array of media profile endpoints that the ONVIF discovered device + supports. + :paramtype endpoints: list[str] + """ + super(DiscoveredOnvifDevice, self).__init__(**kwargs) + self.service_identifier = service_identifier + self.remote_ip_address = remote_ip_address + self.scopes = scopes + self.endpoints = endpoints + + +class DiscoveredOnvifDeviceCollection(msrest.serialization.Model): + """A list of ONVIF devices that were discovered in the same subnet as the IoT Edge device. + + :ivar value: An array of ONVIF devices that have been discovered in the same subnet as the IoT + Edge device. + :vartype value: list[~azure.media.videoanalyzer.edge.models.DiscoveredOnvifDevice] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[DiscoveredOnvifDevice]'}, + } + + def __init__( + self, + *, + value: Optional[List["DiscoveredOnvifDevice"]] = None, + **kwargs + ): + """ + :keyword value: An array of ONVIF devices that have been discovered in the same subnet as the + IoT Edge device. + :paramtype value: list[~azure.media.videoanalyzer.edge.models.DiscoveredOnvifDevice] + """ + super(DiscoveredOnvifDeviceCollection, self).__init__(**kwargs) + self.value = value + + class EndpointBase(msrest.serialization.Model): """Base class for endpoints. @@ -193,12 +301,12 @@ class EndpointBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param credentials: Credentials to be presented to the endpoint. - :type credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase - :param url: Required. The endpoint URL for Video Analyzer to connect to. - :type url: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar credentials: Credentials to be presented to the endpoint. + :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :ivar url: Required. The endpoint URL for Video Analyzer to connect to. + :vartype url: str """ _validation = { @@ -223,6 +331,12 @@ def __init__( credentials: Optional["CredentialsBase"] = None, **kwargs ): + """ + :keyword credentials: Credentials to be presented to the endpoint. + :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :keyword url: Required. The endpoint URL for Video Analyzer to connect to. + :paramtype url: str + """ super(EndpointBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.credentials = credentials @@ -237,21 +351,21 @@ class ExtensionProcessorBase(ProcessorNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param endpoint: Required. Endpoint details of the pipeline extension plugin. - :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase - :param image: Required. Image transformations and formatting options to be applied to the video + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar endpoint: Required. Endpoint details of the pipeline extension plugin. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :ivar image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. - :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties - :param sampling_options: Media sampling parameters that define how often media is submitted to + :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :ivar sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. - :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions """ _validation = { @@ -285,6 +399,21 @@ def __init__( sampling_options: Optional["SamplingOptions"] = None, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword endpoint: Required. Endpoint details of the pipeline extension plugin. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :keyword image: Required. Image transformations and formatting options to be applied to the + video frame(s) prior submission to the pipeline extension plugin. + :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :keyword sampling_options: Media sampling parameters that define how often media is submitted + to the extension plugin. + :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + """ super(ExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.ExtensionProcessorBase' # type: str self.endpoint = endpoint @@ -300,13 +429,13 @@ class SinkNodeBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] """ _validation = { @@ -332,6 +461,13 @@ def __init__( inputs: List["NodeInput"], **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + """ super(SinkNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name @@ -343,22 +479,22 @@ class FileSink(SinkNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param base_directory_path: Required. Absolute directory path where media files will be stored. - :type base_directory_path: str - :param file_name_pattern: Required. File name pattern for creating new files when performing + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar base_directory_path: Required. Absolute directory path where media files will be stored. + :vartype base_directory_path: str + :ivar file_name_pattern: Required. File name pattern for creating new files when performing event based recording. The pattern must include at least one system variable. - :type file_name_pattern: str - :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing + :vartype file_name_pattern: str + :ivar maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing files from this sink. Once this limit is reached, the oldest files from this sink will be automatically deleted. - :type maximum_size_mi_b: str + :vartype maximum_size_mi_b: str """ _validation = { @@ -389,6 +525,23 @@ def __init__( maximum_size_mi_b: str, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword base_directory_path: Required. Absolute directory path where media files will be + stored. + :paramtype base_directory_path: str + :keyword file_name_pattern: Required. File name pattern for creating new files when performing + event based recording. The pattern must include at least one system variable. + :paramtype file_name_pattern: str + :keyword maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing + files from this sink. Once this limit is reached, the oldest files from this sink will be + automatically deleted. + :paramtype maximum_size_mi_b: str + """ super(FileSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.FileSink' # type: str self.base_directory_path = base_directory_path @@ -401,27 +554,27 @@ class GrpcExtension(ExtensionProcessorBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param endpoint: Required. Endpoint details of the pipeline extension plugin. - :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase - :param image: Required. Image transformations and formatting options to be applied to the video + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar endpoint: Required. Endpoint details of the pipeline extension plugin. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :ivar image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. - :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties - :param sampling_options: Media sampling parameters that define how often media is submitted to + :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :ivar sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. - :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions - :param data_transfer: Required. Specifies how media is transferred to the extension plugin. - :type data_transfer: ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransfer - :param extension_configuration: An optional configuration string that is sent to the extension + :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :ivar data_transfer: Required. Specifies how media is transferred to the extension plugin. + :vartype data_transfer: ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransfer + :ivar extension_configuration: An optional configuration string that is sent to the extension plugin. The configuration string is specific to each custom extension and it not understood neither validated by Video Analyzer. Please see https://aka.ms/ava-extension-grpc for details. - :type extension_configuration: str + :vartype extension_configuration: str """ _validation = { @@ -456,6 +609,28 @@ def __init__( extension_configuration: Optional[str] = None, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword endpoint: Required. Endpoint details of the pipeline extension plugin. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :keyword image: Required. Image transformations and formatting options to be applied to the + video frame(s) prior submission to the pipeline extension plugin. + :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :keyword sampling_options: Media sampling parameters that define how often media is submitted + to the extension plugin. + :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :keyword data_transfer: Required. Specifies how media is transferred to the extension plugin. + :paramtype data_transfer: ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransfer + :keyword extension_configuration: An optional configuration string that is sent to the + extension plugin. The configuration string is specific to each custom extension and it not + understood neither validated by Video Analyzer. Please see https://aka.ms/ava-extension-grpc + for details. + :paramtype extension_configuration: str + """ super(GrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.VideoAnalyzer.GrpcExtension' # type: str self.data_transfer = data_transfer @@ -467,12 +642,12 @@ class GrpcExtensionDataTransfer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param shared_memory_size_mi_b: The share memory buffer for sample transfers, in mebibytes. It + :ivar shared_memory_size_mi_b: The share memory buffer for sample transfers, in mebibytes. It can only be used with the 'SharedMemory' transfer mode. - :type shared_memory_size_mi_b: str - :param mode: Required. Data transfer mode: embedded or sharedMemory. Possible values include: + :vartype shared_memory_size_mi_b: str + :ivar mode: Required. Data transfer mode: embedded or sharedMemory. Possible values include: "embedded", "sharedMemory". - :type mode: str or ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransferMode + :vartype mode: str or ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransferMode """ _validation = { @@ -491,31 +666,73 @@ def __init__( shared_memory_size_mi_b: Optional[str] = None, **kwargs ): + """ + :keyword shared_memory_size_mi_b: The share memory buffer for sample transfers, in mebibytes. + It can only be used with the 'SharedMemory' transfer mode. + :paramtype shared_memory_size_mi_b: str + :keyword mode: Required. Data transfer mode: embedded or sharedMemory. Possible values include: + "embedded", "sharedMemory". + :paramtype mode: str or ~azure.media.videoanalyzer.edge.models.GrpcExtensionDataTransferMode + """ super(GrpcExtensionDataTransfer, self).__init__(**kwargs) self.shared_memory_size_mi_b = shared_memory_size_mi_b self.mode = mode +class H264Configuration(msrest.serialization.Model): + """Class representing the H264 Configuration. + + :ivar gov_length: Group of Video frames length. + :vartype gov_length: float + :ivar profile: The H264 Profile. Possible values include: "Baseline", "Main", "Extended", + "High". + :vartype profile: str or ~azure.media.videoanalyzer.edge.models.H264Profile + """ + + _attribute_map = { + 'gov_length': {'key': 'govLength', 'type': 'float'}, + 'profile': {'key': 'profile', 'type': 'str'}, + } + + def __init__( + self, + *, + gov_length: Optional[float] = None, + profile: Optional[Union[str, "H264Profile"]] = None, + **kwargs + ): + """ + :keyword gov_length: Group of Video frames length. + :paramtype gov_length: float + :keyword profile: The H264 Profile. Possible values include: "Baseline", "Main", "Extended", + "High". + :paramtype profile: str or ~azure.media.videoanalyzer.edge.models.H264Profile + """ + super(H264Configuration, self).__init__(**kwargs) + self.gov_length = gov_length + self.profile = profile + + class HttpExtension(ExtensionProcessorBase): """HTTP extension processor allows pipeline extension plugins to be connected to the pipeline through over the HTTP protocol. Extension plugins must act as an HTTP server. Please see https://aka.ms/ava-extension-http for details. All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param endpoint: Required. Endpoint details of the pipeline extension plugin. - :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase - :param image: Required. Image transformations and formatting options to be applied to the video + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar endpoint: Required. Endpoint details of the pipeline extension plugin. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :ivar image: Required. Image transformations and formatting options to be applied to the video frame(s) prior submission to the pipeline extension plugin. - :type image: ~azure.media.videoanalyzer.edge.models.ImageProperties - :param sampling_options: Media sampling parameters that define how often media is submitted to + :vartype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :ivar sampling_options: Media sampling parameters that define how often media is submitted to the extension plugin. - :type sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + :vartype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions """ _validation = { @@ -545,6 +762,21 @@ def __init__( sampling_options: Optional["SamplingOptions"] = None, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword endpoint: Required. Endpoint details of the pipeline extension plugin. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :keyword image: Required. Image transformations and formatting options to be applied to the + video frame(s) prior submission to the pipeline extension plugin. + :paramtype image: ~azure.media.videoanalyzer.edge.models.ImageProperties + :keyword sampling_options: Media sampling parameters that define how often media is submitted + to the extension plugin. + :paramtype sampling_options: ~azure.media.videoanalyzer.edge.models.SamplingOptions + """ super(HttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.VideoAnalyzer.HttpExtension' # type: str @@ -554,14 +786,14 @@ class HttpHeaderCredentials(CredentialsBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param header_name: Required. HTTP header name. - :type header_name: str - :param header_value: Required. HTTP header value. It is recommended that this value is + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar header_name: Required. HTTP header name. + :vartype header_name: str + :ivar header_value: Required. HTTP header value. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. - :type header_value: str + :vartype header_value: str """ _validation = { @@ -583,6 +815,14 @@ def __init__( header_value: str, **kwargs ): + """ + :keyword header_name: Required. HTTP header name. + :paramtype header_name: str + :keyword header_value: Required. HTTP header value. It is recommended that this value is + parameterized as a secret string in order to prevent this value to be returned as part of the + resource on API requests. + :paramtype header_value: str + """ super(HttpHeaderCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.HttpHeaderCredentials' # type: str self.header_name = header_name @@ -597,8 +837,8 @@ class ImageFormatProperties(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -617,6 +857,8 @@ def __init__( self, **kwargs ): + """ + """ super(ImageFormatProperties, self).__init__(**kwargs) self.type = None # type: Optional[str] @@ -626,8 +868,8 @@ class ImageFormatBmp(ImageFormatProperties): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -642,6 +884,8 @@ def __init__( self, **kwargs ): + """ + """ super(ImageFormatBmp, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatBmp' # type: str @@ -651,10 +895,10 @@ class ImageFormatJpeg(ImageFormatProperties): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param quality: Image quality value between 0 to 100 (best quality). - :type quality: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar quality: Image quality value between 0 to 100 (best quality). + :vartype quality: str """ _validation = { @@ -672,6 +916,10 @@ def __init__( quality: Optional[str] = None, **kwargs ): + """ + :keyword quality: Image quality value between 0 to 100 (best quality). + :paramtype quality: str + """ super(ImageFormatJpeg, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatJpeg' # type: str self.quality = quality @@ -682,8 +930,8 @@ class ImageFormatPng(ImageFormatProperties): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -698,6 +946,8 @@ def __init__( self, **kwargs ): + """ + """ super(ImageFormatPng, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatPng' # type: str @@ -707,12 +957,12 @@ class ImageFormatRaw(ImageFormatProperties): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param pixel_format: Required. Pixel format to be applied to the raw image. Possible values + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar pixel_format: Required. Pixel format to be applied to the raw image. Possible values include: "yuv420p", "rgb565be", "rgb565le", "rgb555be", "rgb555le", "rgb24", "bgr24", "argb", "rgba", "abgr", "bgra". - :type pixel_format: str or ~azure.media.videoanalyzer.edge.models.ImageFormatRawPixelFormat + :vartype pixel_format: str or ~azure.media.videoanalyzer.edge.models.ImageFormatRawPixelFormat """ _validation = { @@ -731,6 +981,13 @@ def __init__( pixel_format: Union[str, "ImageFormatRawPixelFormat"], **kwargs ): + """ + :keyword pixel_format: Required. Pixel format to be applied to the raw image. Possible values + include: "yuv420p", "rgb565be", "rgb565le", "rgb555be", "rgb555le", "rgb24", "bgr24", "argb", + "rgba", "abgr", "bgra". + :paramtype pixel_format: str or + ~azure.media.videoanalyzer.edge.models.ImageFormatRawPixelFormat + """ super(ImageFormatRaw, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ImageFormatRaw' # type: str self.pixel_format = pixel_format @@ -739,10 +996,10 @@ def __init__( class ImageProperties(msrest.serialization.Model): """Image transformations and formatting options to be applied to the video frame(s). - :param scale: Image scaling mode. - :type scale: ~azure.media.videoanalyzer.edge.models.ImageScale - :param format: Base class for image formatting properties. - :type format: ~azure.media.videoanalyzer.edge.models.ImageFormatProperties + :ivar scale: Image scaling mode. + :vartype scale: ~azure.media.videoanalyzer.edge.models.ImageScale + :ivar format: Base class for image formatting properties. + :vartype format: ~azure.media.videoanalyzer.edge.models.ImageFormatProperties """ _attribute_map = { @@ -757,6 +1014,12 @@ def __init__( format: Optional["ImageFormatProperties"] = None, **kwargs ): + """ + :keyword scale: Image scaling mode. + :paramtype scale: ~azure.media.videoanalyzer.edge.models.ImageScale + :keyword format: Base class for image formatting properties. + :paramtype format: ~azure.media.videoanalyzer.edge.models.ImageFormatProperties + """ super(ImageProperties, self).__init__(**kwargs) self.scale = scale self.format = format @@ -765,13 +1028,13 @@ def __init__( class ImageScale(msrest.serialization.Model): """Image scaling mode. - :param mode: Describes the image scaling mode to be applied. Default mode is 'pad'. Possible + :ivar mode: Describes the image scaling mode to be applied. Default mode is 'pad'. Possible values include: "preserveAspectRatio", "pad", "stretch". - :type mode: str or ~azure.media.videoanalyzer.edge.models.ImageScaleMode - :param width: The desired output image width. - :type width: str - :param height: The desired output image height. - :type height: str + :vartype mode: str or ~azure.media.videoanalyzer.edge.models.ImageScaleMode + :ivar width: The desired output image width. + :vartype width: str + :ivar height: The desired output image height. + :vartype height: str """ _attribute_map = { @@ -788,27 +1051,78 @@ def __init__( height: Optional[str] = None, **kwargs ): + """ + :keyword mode: Describes the image scaling mode to be applied. Default mode is 'pad'. Possible + values include: "preserveAspectRatio", "pad", "stretch". + :paramtype mode: str or ~azure.media.videoanalyzer.edge.models.ImageScaleMode + :keyword width: The desired output image width. + :paramtype width: str + :keyword height: The desired output image height. + :paramtype height: str + """ super(ImageScale, self).__init__(**kwargs) self.mode = mode self.width = width self.height = height +class IotHubDeviceConnection(msrest.serialization.Model): + """Information that enables communication between the IoT Hub and the IoT device - allowing this edge module to act as a transparent gateway between the two. + + All required parameters must be populated in order to send to Azure. + + :ivar device_id: Required. The name of the IoT device configured and managed in IoT Hub. + (case-sensitive). + :vartype device_id: str + :ivar credentials: IoT device connection credentials. Currently IoT device symmetric key + credentials are supported. + :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + """ + + _validation = { + 'device_id': {'required': True}, + } + + _attribute_map = { + 'device_id': {'key': 'deviceId', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, + } + + def __init__( + self, + *, + device_id: str, + credentials: Optional["CredentialsBase"] = None, + **kwargs + ): + """ + :keyword device_id: Required. The name of the IoT device configured and managed in IoT Hub. + (case-sensitive). + :paramtype device_id: str + :keyword credentials: IoT device connection credentials. Currently IoT device symmetric key + credentials are supported. + :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + """ + super(IotHubDeviceConnection, self).__init__(**kwargs) + self.device_id = device_id + self.credentials = credentials + + class IotHubMessageSink(SinkNodeBase): """IoT Hub Message sink allows for pipeline messages to published into the IoT Edge Hub. Published messages can then be delivered to the cloud and other modules via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param hub_output_name: Required. Name of the Iot Edge Hub output to which the messages will be + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar hub_output_name: Required. Name of the Iot Edge Hub output to which the messages will be published. - :type hub_output_name: str + :vartype hub_output_name: str """ _validation = { @@ -833,6 +1147,16 @@ def __init__( hub_output_name: str, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword hub_output_name: Required. Name of the Iot Edge Hub output to which the messages will + be published. + :paramtype hub_output_name: str + """ super(IotHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.IotHubMessageSink' # type: str self.hub_output_name = hub_output_name @@ -846,10 +1170,10 @@ class SourceNodeBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str """ _validation = { @@ -872,6 +1196,10 @@ def __init__( name: str, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + """ super(SourceNodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name @@ -882,12 +1210,12 @@ class IotHubMessageSource(SourceNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param hub_input_name: Name of the IoT Edge Hub input from which messages will be consumed. - :type hub_input_name: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar hub_input_name: Name of the IoT Edge Hub input from which messages will be consumed. + :vartype hub_input_name: str """ _validation = { @@ -908,6 +1236,12 @@ def __init__( hub_input_name: Optional[str] = None, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword hub_input_name: Name of the IoT Edge Hub input from which messages will be consumed. + :paramtype hub_input_name: str + """ super(IotHubMessageSource, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.IotHubMessageSource' # type: str self.hub_input_name = hub_input_name @@ -918,15 +1252,15 @@ class LineCrossingProcessor(ProcessorNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param lines: Required. An array of lines used to compute line crossing events. - :type lines: list[~azure.media.videoanalyzer.edge.models.NamedLineBase] + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar lines: Required. An array of lines used to compute line crossing events. + :vartype lines: list[~azure.media.videoanalyzer.edge.models.NamedLineBase] """ _validation = { @@ -951,6 +1285,15 @@ def __init__( lines: List["NamedLineBase"], **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword lines: Required. An array of lines used to compute line crossing events. + :paramtype lines: list[~azure.media.videoanalyzer.edge.models.NamedLineBase] + """ super(LineCrossingProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.LineCrossingProcessor' # type: str self.lines = lines @@ -961,12 +1304,12 @@ class LivePipeline(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Live pipeline unique identifier. - :type name: str - :param system_data: Read-only system metadata associated with this object. - :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData - :param properties: Live pipeline properties. - :type properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties + :ivar name: Required. Live pipeline unique identifier. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Live pipeline properties. + :vartype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties """ _validation = { @@ -987,6 +1330,14 @@ def __init__( properties: Optional["LivePipelineProperties"] = None, **kwargs ): + """ + :keyword name: Required. Live pipeline unique identifier. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Live pipeline properties. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties + """ super(LivePipeline, self).__init__(**kwargs) self.name = name self.system_data = system_data @@ -997,7 +1348,7 @@ class MethodRequest(msrest.serialization.Model): """Base class for direct method calls. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LivePipelineSetRequestBody, MethodRequestEmptyBodyBase, PipelineTopologySetRequestBody, LivePipelineListRequest, LivePipelineSetRequest, PipelineTopologyListRequest, PipelineTopologySetRequest. + sub-classes are: LivePipelineSetRequestBody, MethodRequestEmptyBodyBase, PipelineTopologySetRequestBody, RemoteDeviceAdapterSetRequestBody, LivePipelineListRequest, LivePipelineSetRequest, OnvifDeviceDiscoverRequest, OnvifDeviceGetRequest, PipelineTopologyListRequest, PipelineTopologySetRequest, RemoteDeviceAdapterListRequest, RemoteDeviceAdapterSetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -1005,13 +1356,13 @@ class MethodRequest(msrest.serialization.Model): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, } _attribute_map = { @@ -1020,24 +1371,30 @@ class MethodRequest(msrest.serialization.Model): } _subtype_map = { - 'method_name': {'LivePipelineSetRequestBody': 'LivePipelineSetRequestBody', 'MethodRequestEmptyBodyBase': 'MethodRequestEmptyBodyBase', 'PipelineTopologySetRequestBody': 'PipelineTopologySetRequestBody', 'livePipelineList': 'LivePipelineListRequest', 'livePipelineSet': 'LivePipelineSetRequest', 'pipelineTopologyList': 'PipelineTopologyListRequest', 'pipelineTopologySet': 'PipelineTopologySetRequest'} + 'method_name': {'LivePipelineSetRequestBody': 'LivePipelineSetRequestBody', 'MethodRequestEmptyBodyBase': 'MethodRequestEmptyBodyBase', 'PipelineTopologySetRequestBody': 'PipelineTopologySetRequestBody', 'RemoteDeviceAdapterSetRequestBody': 'RemoteDeviceAdapterSetRequestBody', 'livePipelineList': 'LivePipelineListRequest', 'livePipelineSet': 'LivePipelineSetRequest', 'onvifDeviceDiscover': 'OnvifDeviceDiscoverRequest', 'onvifDeviceGet': 'OnvifDeviceGetRequest', 'pipelineTopologyList': 'PipelineTopologyListRequest', 'pipelineTopologySet': 'PipelineTopologySetRequest', 'remoteDeviceAdapterList': 'RemoteDeviceAdapterListRequest', 'remoteDeviceAdapterSet': 'RemoteDeviceAdapterSetRequest'} } - api_version = "1.0" - def __init__( self, + *, + api_version: Optional[str] = "1.1", **kwargs ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + """ super(MethodRequest, self).__init__(**kwargs) self.method_name = None # type: Optional[str] + self.api_version = api_version class MethodRequestEmptyBodyBase(MethodRequest): """MethodRequestEmptyBodyBase. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LivePipelineActivateRequest, LivePipelineDeactivateRequest, LivePipelineDeleteRequest, LivePipelineGetRequest, PipelineTopologyDeleteRequest, PipelineTopologyGetRequest. + sub-classes are: LivePipelineActivateRequest, LivePipelineDeactivateRequest, LivePipelineDeleteRequest, LivePipelineGetRequest, PipelineTopologyDeleteRequest, PipelineTopologyGetRequest, RemoteDeviceAdapterDeleteRequest, RemoteDeviceAdapterGetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -1045,15 +1402,15 @@ class MethodRequestEmptyBodyBase(MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1064,18 +1421,24 @@ class MethodRequestEmptyBodyBase(MethodRequest): } _subtype_map = { - 'method_name': {'livePipelineActivate': 'LivePipelineActivateRequest', 'livePipelineDeactivate': 'LivePipelineDeactivateRequest', 'livePipelineDelete': 'LivePipelineDeleteRequest', 'livePipelineGet': 'LivePipelineGetRequest', 'pipelineTopologyDelete': 'PipelineTopologyDeleteRequest', 'pipelineTopologyGet': 'PipelineTopologyGetRequest'} + 'method_name': {'livePipelineActivate': 'LivePipelineActivateRequest', 'livePipelineDeactivate': 'LivePipelineDeactivateRequest', 'livePipelineDelete': 'LivePipelineDeleteRequest', 'livePipelineGet': 'LivePipelineGetRequest', 'pipelineTopologyDelete': 'PipelineTopologyDeleteRequest', 'pipelineTopologyGet': 'PipelineTopologyGetRequest', 'remoteDeviceAdapterDelete': 'RemoteDeviceAdapterDeleteRequest', 'remoteDeviceAdapterGet': 'RemoteDeviceAdapterGetRequest'} } - api_version = "1.0" - def __init__( self, *, name: str, + api_version: Optional[str] = "1.1", **kwargs ): - super(MethodRequestEmptyBodyBase, self).__init__(**kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(MethodRequestEmptyBodyBase, self).__init__(api_version=api_version, **kwargs) self.method_name = 'MethodRequestEmptyBodyBase' # type: str self.name = name @@ -1089,15 +1452,15 @@ class LivePipelineActivateRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1107,27 +1470,33 @@ class LivePipelineActivateRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, *, name: str, + api_version: Optional[str] = "1.1", **kwargs ): - super(LivePipelineActivateRequest, self).__init__(name=name, **kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(LivePipelineActivateRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'livePipelineActivate' # type: str class LivePipelineCollection(msrest.serialization.Model): """A collection of live pipelines. - :param value: List of live pipelines. - :type value: list[~azure.media.videoanalyzer.edge.models.LivePipeline] - :param continuation_token: A continuation token to be used in subsequent calls when enumerating + :ivar value: List of live pipelines. + :vartype value: list[~azure.media.videoanalyzer.edge.models.LivePipeline] + :ivar continuation_token: A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response. - :type continuation_token: str + :vartype continuation_token: str """ _attribute_map = { @@ -1142,6 +1511,14 @@ def __init__( continuation_token: Optional[str] = None, **kwargs ): + """ + :keyword value: List of live pipelines. + :paramtype value: list[~azure.media.videoanalyzer.edge.models.LivePipeline] + :keyword continuation_token: A continuation token to be used in subsequent calls when + enumerating through the collection. This is returned when the collection results won't fit in a + single response. + :paramtype continuation_token: str + """ super(LivePipelineCollection, self).__init__(**kwargs) self.value = value self.continuation_token = continuation_token @@ -1156,15 +1533,15 @@ class LivePipelineDeactivateRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1174,15 +1551,21 @@ class LivePipelineDeactivateRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, *, name: str, + api_version: Optional[str] = "1.1", **kwargs ): - super(LivePipelineDeactivateRequest, self).__init__(name=name, **kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(LivePipelineDeactivateRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'livePipelineDeactivate' # type: str @@ -1195,15 +1578,15 @@ class LivePipelineDeleteRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1213,15 +1596,21 @@ class LivePipelineDeleteRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, *, name: str, + api_version: Optional[str] = "1.1", **kwargs ): - super(LivePipelineDeleteRequest, self).__init__(name=name, **kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(LivePipelineDeleteRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'livePipelineDelete' # type: str @@ -1234,15 +1623,15 @@ class LivePipelineGetRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1252,15 +1641,21 @@ class LivePipelineGetRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, *, name: str, + api_version: Optional[str] = "1.1", **kwargs ): - super(LivePipelineGetRequest, self).__init__(name=name, **kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(LivePipelineGetRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'livePipelineGet' # type: str @@ -1273,13 +1668,13 @@ class LivePipelineListRequest(MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, } _attribute_map = { @@ -1287,33 +1682,38 @@ class LivePipelineListRequest(MethodRequest): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, + *, + api_version: Optional[str] = "1.1", **kwargs ): - super(LivePipelineListRequest, self).__init__(**kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + """ + super(LivePipelineListRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'livePipelineList' # type: str class LivePipelineProperties(msrest.serialization.Model): """Live pipeline properties. - :param description: An optional description of the live pipeline. - :type description: str - :param topology_name: The reference to an existing pipeline topology defined for real-time + :ivar description: An optional description of the live pipeline. + :vartype description: str + :ivar topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. - :type topology_name: str - :param parameters: List of the instance level parameter values for the user-defined topology + :vartype topology_name: str + :ivar parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. - :type parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDefinition] - :param state: Current pipeline state (read-only). Possible values include: "inactive", + :vartype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDefinition] + :ivar state: Current pipeline state (read-only). Possible values include: "inactive", "activating", "active", "deactivating". - :type state: str or ~azure.media.videoanalyzer.edge.models.LivePipelineState + :vartype state: str or ~azure.media.videoanalyzer.edge.models.LivePipelineState """ _attribute_map = { @@ -1332,6 +1732,22 @@ def __init__( state: Optional[Union[str, "LivePipelineState"]] = None, **kwargs ): + """ + :keyword description: An optional description of the live pipeline. + :paramtype description: str + :keyword topology_name: The reference to an existing pipeline topology defined for real-time + content processing. When activated, this live pipeline will process content according to the + pipeline topology definition. + :paramtype topology_name: str + :keyword parameters: List of the instance level parameter values for the user-defined topology + parameters. A pipeline can only define or override parameters values for parameters which have + been declared in the referenced topology. Topology parameters without a default value must be + defined. Topology parameters with a default value can be optionally be overridden. + :paramtype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDefinition] + :keyword state: Current pipeline state (read-only). Possible values include: "inactive", + "activating", "active", "deactivating". + :paramtype state: str or ~azure.media.videoanalyzer.edge.models.LivePipelineState + """ super(LivePipelineProperties, self).__init__(**kwargs) self.description = description self.topology_name = topology_name @@ -1348,16 +1764,16 @@ class LivePipelineSetRequest(MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param live_pipeline: Required. Live Pipeline represents an unique instance of a pipeline + :ivar live_pipeline: Required. Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis. - :type live_pipeline: ~azure.media.videoanalyzer.edge.models.LivePipeline + :vartype live_pipeline: ~azure.media.videoanalyzer.edge.models.LivePipeline """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'live_pipeline': {'required': True}, } @@ -1367,21 +1783,28 @@ class LivePipelineSetRequest(MethodRequest): 'live_pipeline': {'key': 'livePipeline', 'type': 'LivePipeline'}, } - api_version = "1.0" - def __init__( self, *, live_pipeline: "LivePipeline", + api_version: Optional[str] = "1.1", **kwargs ): - super(LivePipelineSetRequest, self).__init__(**kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword live_pipeline: Required. Live Pipeline represents an unique instance of a pipeline + topology which is used for real-time content ingestion and analysis. + :paramtype live_pipeline: ~azure.media.videoanalyzer.edge.models.LivePipeline + """ + super(LivePipelineSetRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'livePipelineSet' # type: str self.live_pipeline = live_pipeline class LivePipelineSetRequestBody(LivePipeline, MethodRequest): - """Live pipeline resource representation. + """Live Pipeline represents an unique instance of a pipeline topology which is used for real-time content ingestion and analysis. Variables are only populated by the server, and will be ignored when sending a request. @@ -1389,19 +1812,19 @@ class LivePipelineSetRequestBody(LivePipeline, MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Live pipeline unique identifier. - :type name: str - :param system_data: Read-only system metadata associated with this object. - :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData - :param properties: Live pipeline properties. - :type properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties + :ivar name: Required. Live pipeline unique identifier. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Live pipeline properties. + :vartype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1413,46 +1836,124 @@ class LivePipelineSetRequestBody(LivePipeline, MethodRequest): 'properties': {'key': 'properties', 'type': 'LivePipelineProperties'}, } - api_version = "1.0" - def __init__( self, *, name: str, + api_version: Optional[str] = "1.1", system_data: Optional["SystemData"] = None, properties: Optional["LivePipelineProperties"] = None, **kwargs ): - super(LivePipelineSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Live pipeline unique identifier. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Live pipeline properties. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.LivePipelineProperties + """ + super(LivePipelineSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, api_version=api_version, **kwargs) self.method_name = 'LivePipelineSetRequestBody' # type: str + self.api_version = api_version self.method_name = 'LivePipelineSetRequestBody' # type: str self.name = name self.system_data = system_data self.properties = properties +class MediaProfile(msrest.serialization.Model): + """Class representing the ONVIF MediaProfiles. + + :ivar name: The name of the Media Profile. + :vartype name: str + :ivar media_uri: Object representing the URI that will be used to request for media streaming. + :vartype media_uri: any + :ivar video_encoder_configuration: The Video encoder configuration. + :vartype video_encoder_configuration: + ~azure.media.videoanalyzer.edge.models.VideoEncoderConfiguration + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'media_uri': {'key': 'mediaUri', 'type': 'object'}, + 'video_encoder_configuration': {'key': 'videoEncoderConfiguration', 'type': 'VideoEncoderConfiguration'}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + media_uri: Optional[Any] = None, + video_encoder_configuration: Optional["VideoEncoderConfiguration"] = None, + **kwargs + ): + """ + :keyword name: The name of the Media Profile. + :paramtype name: str + :keyword media_uri: Object representing the URI that will be used to request for media + streaming. + :paramtype media_uri: any + :keyword video_encoder_configuration: The Video encoder configuration. + :paramtype video_encoder_configuration: + ~azure.media.videoanalyzer.edge.models.VideoEncoderConfiguration + """ + super(MediaProfile, self).__init__(**kwargs) + self.name = name + self.media_uri = media_uri + self.video_encoder_configuration = video_encoder_configuration + + +class MediaUri(msrest.serialization.Model): + """Object representing the URI that will be used to request for media streaming. + + :ivar uri: URI that can be used for media streaming. + :vartype uri: str + """ + + _attribute_map = { + 'uri': {'key': 'uri', 'type': 'str'}, + } + + def __init__( + self, + *, + uri: Optional[str] = None, + **kwargs + ): + """ + :keyword uri: URI that can be used for media streaming. + :paramtype uri: str + """ + super(MediaUri, self).__init__(**kwargs) + self.uri = uri + + class MotionDetectionProcessor(ProcessorNodeBase): """Motion detection processor allows for motion detection on the video stream. It generates motion events whenever motion is present on the video. All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param sensitivity: Motion detection sensitivity: low, medium, high. Possible values include: + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar sensitivity: Motion detection sensitivity: low, medium, high. Possible values include: "low", "medium", "high". - :type sensitivity: str or ~azure.media.videoanalyzer.edge.models.MotionDetectionSensitivity - :param output_motion_region: Indicates whether the processor should detect and output the + :vartype sensitivity: str or ~azure.media.videoanalyzer.edge.models.MotionDetectionSensitivity + :ivar output_motion_region: Indicates whether the processor should detect and output the regions within the video frame where motion was detected. Default is true. - :type output_motion_region: bool - :param event_aggregation_window: Time window duration on which events are aggregated before + :vartype output_motion_region: bool + :ivar event_aggregation_window: Time window duration on which events are aggregated before being emitted. Value must be specified in ISO8601 duration format (i.e. "PT2S" equals 2 seconds). Use 0 seconds for no aggregation. Default is 1 second. - :type event_aggregation_window: str + :vartype event_aggregation_window: str """ _validation = { @@ -1480,6 +1981,24 @@ def __init__( event_aggregation_window: Optional[str] = None, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword sensitivity: Motion detection sensitivity: low, medium, high. Possible values include: + "low", "medium", "high". + :paramtype sensitivity: str or + ~azure.media.videoanalyzer.edge.models.MotionDetectionSensitivity + :keyword output_motion_region: Indicates whether the processor should detect and output the + regions within the video frame where motion was detected. Default is true. + :paramtype output_motion_region: bool + :keyword event_aggregation_window: Time window duration on which events are aggregated before + being emitted. Value must be specified in ISO8601 duration format (i.e. "PT2S" equals 2 + seconds). Use 0 seconds for no aggregation. Default is 1 second. + :paramtype event_aggregation_window: str + """ super(MotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.MotionDetectionProcessor' # type: str self.sensitivity = sensitivity @@ -1487,6 +2006,38 @@ def __init__( self.event_aggregation_window = event_aggregation_window +class MPEG4Configuration(msrest.serialization.Model): + """Class representing the MPEG4 Configuration. + + :ivar gov_length: Group of Video frames length. + :vartype gov_length: float + :ivar profile: The MPEG4 Profile. Possible values include: "SP", "ASP". + :vartype profile: str or ~azure.media.videoanalyzer.edge.models.MPEG4Profile + """ + + _attribute_map = { + 'gov_length': {'key': 'govLength', 'type': 'float'}, + 'profile': {'key': 'profile', 'type': 'str'}, + } + + def __init__( + self, + *, + gov_length: Optional[float] = None, + profile: Optional[Union[str, "MPEG4Profile"]] = None, + **kwargs + ): + """ + :keyword gov_length: Group of Video frames length. + :paramtype gov_length: float + :keyword profile: The MPEG4 Profile. Possible values include: "SP", "ASP". + :paramtype profile: str or ~azure.media.videoanalyzer.edge.models.MPEG4Profile + """ + super(MPEG4Configuration, self).__init__(**kwargs) + self.gov_length = gov_length + self.profile = profile + + class NamedLineBase(msrest.serialization.Model): """Base class for named lines. @@ -1495,10 +2046,10 @@ class NamedLineBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Line name. Must be unique within the node. - :type name: str + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Line name. Must be unique within the node. + :vartype name: str """ _validation = { @@ -1521,6 +2072,10 @@ def __init__( name: str, **kwargs ): + """ + :keyword name: Required. Line name. Must be unique within the node. + :paramtype name: str + """ super(NamedLineBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name @@ -1531,15 +2086,15 @@ class NamedLineString(NamedLineBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Line name. Must be unique within the node. - :type name: str - :param line: Required. Point coordinates for the line start and end, respectively. Example: + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Line name. Must be unique within the node. + :vartype name: str + :ivar line: Required. Point coordinates for the line start and end, respectively. Example: '[[0.3, 0.2],[0.9, 0.8]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. - :type line: str + :vartype line: str """ _validation = { @@ -1561,6 +2116,15 @@ def __init__( line: str, **kwargs ): + """ + :keyword name: Required. Line name. Must be unique within the node. + :paramtype name: str + :keyword line: Required. Point coordinates for the line start and end, respectively. Example: + '[[0.3, 0.2],[0.9, 0.8]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging + from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right + frame corner. + :paramtype line: str + """ super(NamedLineString, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.NamedLineString' # type: str self.line = line @@ -1574,10 +2138,10 @@ class NamedPolygonBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Polygon name. Must be unique within the node. - :type name: str + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Polygon name. Must be unique within the node. + :vartype name: str """ _validation = { @@ -1600,6 +2164,10 @@ def __init__( name: str, **kwargs ): + """ + :keyword name: Required. Polygon name. Must be unique within the node. + :paramtype name: str + """ super(NamedPolygonBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = name @@ -1610,14 +2178,14 @@ class NamedPolygonString(NamedPolygonBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Polygon name. Must be unique within the node. - :type name: str - :param polygon: Required. Point coordinates for the polygon. Example: '[[0.3, 0.2],[0.9, + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Polygon name. Must be unique within the node. + :vartype name: str + :ivar polygon: Required. Point coordinates for the polygon. Example: '[[0.3, 0.2],[0.9, 0.8],[0.7, 0.6]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. - :type polygon: str + :vartype polygon: str """ _validation = { @@ -1639,6 +2207,14 @@ def __init__( polygon: str, **kwargs ): + """ + :keyword name: Required. Polygon name. Must be unique within the node. + :paramtype name: str + :keyword polygon: Required. Point coordinates for the polygon. Example: '[[0.3, 0.2],[0.9, + 0.8],[0.7, 0.6]]'. Each point is expressed as [LEFT, TOP] coordinate ratios ranging from 0.0 to + 1.0, where [0,0] is the upper-left frame corner and [1, 1] is the bottom-right frame corner. + :paramtype polygon: str + """ super(NamedPolygonString, self).__init__(name=name, **kwargs) self.type = '#Microsoft.VideoAnalyzer.NamedPolygonString' # type: str self.polygon = polygon @@ -1649,12 +2225,12 @@ class NodeInput(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param node_name: Required. The name of the upstream node in the pipeline which output is used + :ivar node_name: Required. The name of the upstream node in the pipeline which output is used as input of the current node. - :type node_name: str - :param output_selectors: Allows for the selection of specific data streams (eg. video only) - from another node. - :type output_selectors: list[~azure.media.videoanalyzer.edge.models.OutputSelector] + :vartype node_name: str + :ivar output_selectors: Allows for the selection of specific data streams (eg. video only) from + another node. + :vartype output_selectors: list[~azure.media.videoanalyzer.edge.models.OutputSelector] """ _validation = { @@ -1673,6 +2249,14 @@ def __init__( output_selectors: Optional[List["OutputSelector"]] = None, **kwargs ): + """ + :keyword node_name: Required. The name of the upstream node in the pipeline which output is + used as input of the current node. + :paramtype node_name: str + :keyword output_selectors: Allows for the selection of specific data streams (eg. video only) + from another node. + :paramtype output_selectors: list[~azure.media.videoanalyzer.edge.models.OutputSelector] + """ super(NodeInput, self).__init__(**kwargs) self.node_name = node_name self.output_selectors = output_selectors @@ -1683,16 +2267,16 @@ class ObjectTrackingProcessor(ProcessorNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param accuracy: Object tracker accuracy: low, medium, high. Higher accuracy leads to higher - CPU consumption in average. Possible values include: "low", "medium", "high". - :type accuracy: str or ~azure.media.videoanalyzer.edge.models.ObjectTrackingAccuracy + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar accuracy: Object tracker accuracy: low, medium, high. Higher accuracy leads to higher CPU + consumption in average. Possible values include: "low", "medium", "high". + :vartype accuracy: str or ~azure.media.videoanalyzer.edge.models.ObjectTrackingAccuracy """ _validation = { @@ -1716,21 +2300,282 @@ def __init__( accuracy: Optional[Union[str, "ObjectTrackingAccuracy"]] = None, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword accuracy: Object tracker accuracy: low, medium, high. Higher accuracy leads to higher + CPU consumption in average. Possible values include: "low", "medium", "high". + :paramtype accuracy: str or ~azure.media.videoanalyzer.edge.models.ObjectTrackingAccuracy + """ super(ObjectTrackingProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.ObjectTrackingProcessor' # type: str self.accuracy = accuracy +class OnvifDevice(msrest.serialization.Model): + """The ONVIF device properties. + + :ivar hostname: The hostname of the ONVIF device. + :vartype hostname: ~azure.media.videoanalyzer.edge.models.OnvifHostName + :ivar system_date_time: The system date and time of the ONVIF device. + :vartype system_date_time: ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTime + :ivar dns: The ONVIF device DNS properties. + :vartype dns: ~azure.media.videoanalyzer.edge.models.OnvifDns + :ivar media_profiles: An array of of ONVIF media profiles supported by the ONVIF device. + :vartype media_profiles: list[~azure.media.videoanalyzer.edge.models.MediaProfile] + """ + + _attribute_map = { + 'hostname': {'key': 'hostname', 'type': 'OnvifHostName'}, + 'system_date_time': {'key': 'systemDateTime', 'type': 'OnvifSystemDateTime'}, + 'dns': {'key': 'dns', 'type': 'OnvifDns'}, + 'media_profiles': {'key': 'mediaProfiles', 'type': '[MediaProfile]'}, + } + + def __init__( + self, + *, + hostname: Optional["OnvifHostName"] = None, + system_date_time: Optional["OnvifSystemDateTime"] = None, + dns: Optional["OnvifDns"] = None, + media_profiles: Optional[List["MediaProfile"]] = None, + **kwargs + ): + """ + :keyword hostname: The hostname of the ONVIF device. + :paramtype hostname: ~azure.media.videoanalyzer.edge.models.OnvifHostName + :keyword system_date_time: The system date and time of the ONVIF device. + :paramtype system_date_time: ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTime + :keyword dns: The ONVIF device DNS properties. + :paramtype dns: ~azure.media.videoanalyzer.edge.models.OnvifDns + :keyword media_profiles: An array of of ONVIF media profiles supported by the ONVIF device. + :paramtype media_profiles: list[~azure.media.videoanalyzer.edge.models.MediaProfile] + """ + super(OnvifDevice, self).__init__(**kwargs) + self.hostname = hostname + self.system_date_time = system_date_time + self.dns = dns + self.media_profiles = media_profiles + + +class OnvifDeviceDiscoverRequest(MethodRequest): + """Lists all the discoverable ONVIF devices on the same subnet as the Edge Module. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar discovery_duration: The amount of time that the ONVIF device discovery will wait for + supported device responses. + :vartype discovery_duration: str + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'discovery_duration': {'key': 'discoveryDuration', 'type': 'str'}, + } + + def __init__( + self, + *, + api_version: Optional[str] = "1.1", + discovery_duration: Optional[str] = None, + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword discovery_duration: The amount of time that the ONVIF device discovery will wait for + supported device responses. + :paramtype discovery_duration: str + """ + super(OnvifDeviceDiscoverRequest, self).__init__(api_version=api_version, **kwargs) + self.method_name = 'onvifDeviceDiscover' # type: str + self.discovery_duration = discovery_duration + + +class OnvifDeviceGetRequest(MethodRequest): + """Retrieves properties and media profiles of an ONVIF device. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar endpoint: Required. Base class for endpoints. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, + } + + def __init__( + self, + *, + endpoint: "EndpointBase", + api_version: Optional[str] = "1.1", + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword endpoint: Required. Base class for endpoints. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + """ + super(OnvifDeviceGetRequest, self).__init__(api_version=api_version, **kwargs) + self.method_name = 'onvifDeviceGet' # type: str + self.endpoint = endpoint + + +class OnvifDns(msrest.serialization.Model): + """The ONVIF device DNS properties. + + :ivar from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. + :vartype from_dhcp: bool + :ivar ipv4_address: An array of IPv4 address for the discovered ONVIF device. + :vartype ipv4_address: list[str] + :ivar ipv6_address: An array of IPv6 address for the discovered ONVIF device. + :vartype ipv6_address: list[str] + """ + + _attribute_map = { + 'from_dhcp': {'key': 'fromDhcp', 'type': 'bool'}, + 'ipv4_address': {'key': 'ipv4Address', 'type': '[str]'}, + 'ipv6_address': {'key': 'ipv6Address', 'type': '[str]'}, + } + + def __init__( + self, + *, + from_dhcp: Optional[bool] = None, + ipv4_address: Optional[List[str]] = None, + ipv6_address: Optional[List[str]] = None, + **kwargs + ): + """ + :keyword from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. + :paramtype from_dhcp: bool + :keyword ipv4_address: An array of IPv4 address for the discovered ONVIF device. + :paramtype ipv4_address: list[str] + :keyword ipv6_address: An array of IPv6 address for the discovered ONVIF device. + :paramtype ipv6_address: list[str] + """ + super(OnvifDns, self).__init__(**kwargs) + self.from_dhcp = from_dhcp + self.ipv4_address = ipv4_address + self.ipv6_address = ipv6_address + + +class OnvifHostName(msrest.serialization.Model): + """The ONVIF device DNS properties. + + :ivar from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. + :vartype from_dhcp: bool + :ivar hostname: The hostname of the ONVIF device. + :vartype hostname: str + """ + + _attribute_map = { + 'from_dhcp': {'key': 'fromDhcp', 'type': 'bool'}, + 'hostname': {'key': 'hostname', 'type': 'str'}, + } + + def __init__( + self, + *, + from_dhcp: Optional[bool] = None, + hostname: Optional[str] = None, + **kwargs + ): + """ + :keyword from_dhcp: Result value showing if the ONVIF device is configured to use DHCP. + :paramtype from_dhcp: bool + :keyword hostname: The hostname of the ONVIF device. + :paramtype hostname: str + """ + super(OnvifHostName, self).__init__(**kwargs) + self.from_dhcp = from_dhcp + self.hostname = hostname + + +class OnvifSystemDateTime(msrest.serialization.Model): + """The ONVIF device DNS properties. + + :ivar type: An enum value determining whether the date time was configured using NTP or manual. + Possible values include: "Ntp", "Manual". + :vartype type: str or ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTimeType + :ivar time: The device datetime returned when calling the request. + :vartype time: str + :ivar time_zone: The timezone of the ONVIF device datetime. + :vartype time_zone: str + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'time': {'key': 'time', 'type': 'str'}, + 'time_zone': {'key': 'timeZone', 'type': 'str'}, + } + + def __init__( + self, + *, + type: Optional[Union[str, "OnvifSystemDateTimeType"]] = None, + time: Optional[str] = None, + time_zone: Optional[str] = None, + **kwargs + ): + """ + :keyword type: An enum value determining whether the date time was configured using NTP or + manual. Possible values include: "Ntp", "Manual". + :paramtype type: str or ~azure.media.videoanalyzer.edge.models.OnvifSystemDateTimeType + :keyword time: The device datetime returned when calling the request. + :paramtype time: str + :keyword time_zone: The timezone of the ONVIF device datetime. + :paramtype time_zone: str + """ + super(OnvifSystemDateTime, self).__init__(**kwargs) + self.type = type + self.time = time + self.time_zone = time_zone + + class OutputSelector(msrest.serialization.Model): """Allows for the selection of particular streams from another node. - :param property: The property of the data stream to be used as the selection criteria. Possible + :ivar property: The property of the data stream to be used as the selection criteria. Possible values include: "mediaType". - :type property: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorProperty - :param operator: The operator to compare properties by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorOperator - :param value: Value to compare against. - :type value: str + :vartype property: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorProperty + :ivar operator: The operator to compare properties by. Possible values include: "is", "isNot". + :vartype operator: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorOperator + :ivar value: Value to compare against. + :vartype value: str """ _attribute_map = { @@ -1747,6 +2592,16 @@ def __init__( value: Optional[str] = None, **kwargs ): + """ + :keyword property: The property of the data stream to be used as the selection criteria. + Possible values include: "mediaType". + :paramtype property: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorProperty + :keyword operator: The operator to compare properties by. Possible values include: "is", + "isNot". + :paramtype operator: str or ~azure.media.videoanalyzer.edge.models.OutputSelectorOperator + :keyword value: Value to compare against. + :paramtype value: str + """ super(OutputSelector, self).__init__(**kwargs) self.property = property self.operator = operator @@ -1758,16 +2613,16 @@ class ParameterDeclaration(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Name of the parameter. - :type name: str - :param type: Required. Type of the parameter. Possible values include: "string", - "secretString", "int", "double", "bool". - :type type: str or ~azure.media.videoanalyzer.edge.models.ParameterType - :param description: Description of the parameter. - :type description: str - :param default: The default value for the parameter to be used if the live pipeline does not + :ivar name: Required. Name of the parameter. + :vartype name: str + :ivar type: Required. Type of the parameter. Possible values include: "string", "secretString", + "int", "double", "bool". + :vartype type: str or ~azure.media.videoanalyzer.edge.models.ParameterType + :ivar description: Description of the parameter. + :vartype description: str + :ivar default: The default value for the parameter to be used if the live pipeline does not specify a value. - :type default: str + :vartype default: str """ _validation = { @@ -1791,6 +2646,18 @@ def __init__( default: Optional[str] = None, **kwargs ): + """ + :keyword name: Required. Name of the parameter. + :paramtype name: str + :keyword type: Required. Type of the parameter. Possible values include: "string", + "secretString", "int", "double", "bool". + :paramtype type: str or ~azure.media.videoanalyzer.edge.models.ParameterType + :keyword description: Description of the parameter. + :paramtype description: str + :keyword default: The default value for the parameter to be used if the live pipeline does not + specify a value. + :paramtype default: str + """ super(ParameterDeclaration, self).__init__(**kwargs) self.name = name self.type = type @@ -1803,10 +2670,10 @@ class ParameterDefinition(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Name of the parameter declared in the pipeline topology. - :type name: str - :param value: Parameter value to be applied on this specific live pipeline. - :type value: str + :ivar name: Required. Name of the parameter declared in the pipeline topology. + :vartype name: str + :ivar value: Parameter value to be applied on this specific live pipeline. + :vartype value: str """ _validation = { @@ -1825,6 +2692,12 @@ def __init__( value: Optional[str] = None, **kwargs ): + """ + :keyword name: Required. Name of the parameter declared in the pipeline topology. + :paramtype name: str + :keyword value: Parameter value to be applied on this specific live pipeline. + :paramtype value: str + """ super(ParameterDefinition, self).__init__(**kwargs) self.name = name self.value = value @@ -1835,10 +2708,10 @@ class PemCertificateList(CertificateSource): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param certificates: Required. PEM formatted public certificates. One certificate per entry. - :type certificates: list[str] + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar certificates: Required. PEM formatted public certificates. One certificate per entry. + :vartype certificates: list[str] """ _validation = { @@ -1857,6 +2730,10 @@ def __init__( certificates: List[str], **kwargs ): + """ + :keyword certificates: Required. PEM formatted public certificates. One certificate per entry. + :paramtype certificates: list[str] + """ super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates = certificates @@ -1873,12 +2750,12 @@ class PipelineTopology(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Pipeline topology unique identifier. - :type name: str - :param system_data: Read-only system metadata associated with this object. - :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData - :param properties: Pipeline topology properties. - :type properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties + :ivar name: Required. Pipeline topology unique identifier. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Pipeline topology properties. + :vartype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties """ _validation = { @@ -1899,6 +2776,14 @@ def __init__( properties: Optional["PipelineTopologyProperties"] = None, **kwargs ): + """ + :keyword name: Required. Pipeline topology unique identifier. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Pipeline topology properties. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties + """ super(PipelineTopology, self).__init__(**kwargs) self.name = name self.system_data = system_data @@ -1908,12 +2793,12 @@ def __init__( class PipelineTopologyCollection(msrest.serialization.Model): """A collection of pipeline topologies. - :param value: List of pipeline topologies. - :type value: list[~azure.media.videoanalyzer.edge.models.PipelineTopology] - :param continuation_token: A continuation token to be used in subsequent calls when enumerating + :ivar value: List of pipeline topologies. + :vartype value: list[~azure.media.videoanalyzer.edge.models.PipelineTopology] + :ivar continuation_token: A continuation token to be used in subsequent calls when enumerating through the collection. This is returned when the collection results won't fit in a single response. - :type continuation_token: str + :vartype continuation_token: str """ _attribute_map = { @@ -1928,6 +2813,14 @@ def __init__( continuation_token: Optional[str] = None, **kwargs ): + """ + :keyword value: List of pipeline topologies. + :paramtype value: list[~azure.media.videoanalyzer.edge.models.PipelineTopology] + :keyword continuation_token: A continuation token to be used in subsequent calls when + enumerating through the collection. This is returned when the collection results won't fit in a + single response. + :paramtype continuation_token: str + """ super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = value self.continuation_token = continuation_token @@ -1942,15 +2835,15 @@ class PipelineTopologyDeleteRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1960,15 +2853,21 @@ class PipelineTopologyDeleteRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, *, name: str, + api_version: Optional[str] = "1.1", **kwargs ): - super(PipelineTopologyDeleteRequest, self).__init__(name=name, **kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(PipelineTopologyDeleteRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'pipelineTopologyDelete' # type: str @@ -1981,15 +2880,15 @@ class PipelineTopologyGetRequest(MethodRequestEmptyBodyBase): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Resource name. - :type name: str + :ivar name: Required. Resource name. + :vartype name: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1999,15 +2898,21 @@ class PipelineTopologyGetRequest(MethodRequestEmptyBodyBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, *, name: str, + api_version: Optional[str] = "1.1", **kwargs ): - super(PipelineTopologyGetRequest, self).__init__(name=name, **kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(PipelineTopologyGetRequest, self).__init__(api_version=api_version, name=name, **kwargs) self.method_name = 'pipelineTopologyGet' # type: str @@ -2020,13 +2925,13 @@ class PipelineTopologyListRequest(MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, } _attribute_map = { @@ -2034,36 +2939,41 @@ class PipelineTopologyListRequest(MethodRequest): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" - def __init__( self, + *, + api_version: Optional[str] = "1.1", **kwargs ): - super(PipelineTopologyListRequest, self).__init__(**kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + """ + super(PipelineTopologyListRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'pipelineTopologyList' # type: str class PipelineTopologyProperties(msrest.serialization.Model): """Pipeline topology properties. - :param description: An optional description of the pipeline topology. It is recommended that - the expected use of the topology to be described here. - :type description: str - :param parameters: List of the topology parameter declarations. Parameters declared here can be + :ivar description: An optional description of the pipeline topology. It is recommended that the + expected use of the topology to be described here. + :vartype description: str + :ivar parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. - :type parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDeclaration] - :param sources: List of the topology source nodes. Source nodes enable external data to be + :vartype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDeclaration] + :ivar sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. - :type sources: list[~azure.media.videoanalyzer.edge.models.SourceNodeBase] - :param processors: List of the topology processor nodes. Processor nodes enable pipeline data - to be analyzed, processed or transformed. - :type processors: list[~azure.media.videoanalyzer.edge.models.ProcessorNodeBase] - :param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or + :vartype sources: list[~azure.media.videoanalyzer.edge.models.SourceNodeBase] + :ivar processors: List of the topology processor nodes. Processor nodes enable pipeline data to + be analyzed, processed or transformed. + :vartype processors: list[~azure.media.videoanalyzer.edge.models.ProcessorNodeBase] + :ivar sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. - :type sinks: list[~azure.media.videoanalyzer.edge.models.SinkNodeBase] + :vartype sinks: list[~azure.media.videoanalyzer.edge.models.SinkNodeBase] """ _attribute_map = { @@ -2084,6 +2994,25 @@ def __init__( sinks: Optional[List["SinkNodeBase"]] = None, **kwargs ): + """ + :keyword description: An optional description of the pipeline topology. It is recommended that + the expected use of the topology to be described here. + :paramtype description: str + :keyword parameters: List of the topology parameter declarations. Parameters declared here can + be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string + pattern. Parameters can have optional default values and can later be defined in individual + instances of the pipeline. + :paramtype parameters: list[~azure.media.videoanalyzer.edge.models.ParameterDeclaration] + :keyword sources: List of the topology source nodes. Source nodes enable external data to be + ingested by the pipeline. + :paramtype sources: list[~azure.media.videoanalyzer.edge.models.SourceNodeBase] + :keyword processors: List of the topology processor nodes. Processor nodes enable pipeline data + to be analyzed, processed or transformed. + :paramtype processors: list[~azure.media.videoanalyzer.edge.models.ProcessorNodeBase] + :keyword sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or + exported. + :paramtype sinks: list[~azure.media.videoanalyzer.edge.models.SinkNodeBase] + """ super(PipelineTopologyProperties, self).__init__(**kwargs) self.description = description self.parameters = parameters @@ -2101,9 +3030,10 @@ class PipelineTopologySetRequest(MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param pipeline_topology: Required. Pipeline topology describes the processing steps to be + :ivar pipeline_topology: Required. Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires @@ -2122,12 +3052,11 @@ class PipelineTopologySetRequest(MethodRequest): * Processors: list of nodes which perform data analysis or transformations. -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. - :type pipeline_topology: ~azure.media.videoanalyzer.edge.models.PipelineTopology + :vartype pipeline_topology: ~azure.media.videoanalyzer.edge.models.PipelineTopology """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'pipeline_topology': {'required': True}, } @@ -2137,21 +3066,51 @@ class PipelineTopologySetRequest(MethodRequest): 'pipeline_topology': {'key': 'pipelineTopology', 'type': 'PipelineTopology'}, } - api_version = "1.0" - def __init__( self, *, pipeline_topology: "PipelineTopology", + api_version: Optional[str] = "1.1", **kwargs ): - super(PipelineTopologySetRequest, self).__init__(**kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword pipeline_topology: Required. Pipeline topology describes the processing steps to be + applied when processing media for a particular outcome. The topology should be defined + according to the scenario to be achieved and can be reused across many pipeline instances which + share the same processing characteristics. For instance, a pipeline topology which acquires + data from a RTSP camera, process it with an specific AI model and stored the data on the cloud + can be reused across many different cameras, as long as the same processing should be applied + across all the cameras. Individual instance properties can be defined through the use of + user-defined parameters, which allow for a topology to be parameterized, thus allowing + individual pipelines to refer to different values, such as individual cameras RTSP endpoints + and credentials. Overall a topology is composed of the following: + + + * Parameters: list of user defined parameters that can be references across the topology + nodes. + * Sources: list of one or more data sources nodes such as an RTSP source which allows for + media to be ingested from cameras. + * Processors: list of nodes which perform data analysis or transformations. + -Sinks: list of one or more data sinks which allow for data to be stored or exported to + other destinations. + :paramtype pipeline_topology: ~azure.media.videoanalyzer.edge.models.PipelineTopology + """ + super(PipelineTopologySetRequest, self).__init__(api_version=api_version, **kwargs) self.method_name = 'pipelineTopologySet' # type: str self.pipeline_topology = pipeline_topology class PipelineTopologySetRequestBody(PipelineTopology, MethodRequest): - """Pipeline topology resource representation. + """Pipeline topology describes the processing steps to be applied when processing media for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which acquires data from a RTSP camera, process it with an specific AI model and stored the data on the cloud can be reused across many different cameras, as long as the same processing should be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized, thus allowing individual pipelines to refer to different values, such as individual cameras RTSP endpoints and credentials. Overall a topology is composed of the following: + + +* Parameters: list of user defined parameters that can be references across the topology nodes. +* Sources: list of one or more data sources nodes such as an RTSP source which allows for media to be ingested from cameras. +* Processors: list of nodes which perform data analysis or transformations. + -Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. Variables are only populated by the server, and will be ignored when sending a request. @@ -2159,19 +3118,19 @@ class PipelineTopologySetRequestBody(PipelineTopology, MethodRequest): :ivar method_name: Required. Direct method method name.Constant filled by server. :vartype method_name: str - :ivar api_version: Video Analyzer API version. Default value: "1.0". + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". :vartype api_version: str - :param name: Required. Pipeline topology unique identifier. - :type name: str - :param system_data: Read-only system metadata associated with this object. - :type system_data: ~azure.media.videoanalyzer.edge.models.SystemData - :param properties: Pipeline topology properties. - :type properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties + :ivar name: Required. Pipeline topology unique identifier. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Pipeline topology properties. + :vartype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties """ _validation = { 'method_name': {'required': True, 'readonly': True}, - 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -2183,84 +3142,565 @@ class PipelineTopologySetRequestBody(PipelineTopology, MethodRequest): 'properties': {'key': 'properties', 'type': 'PipelineTopologyProperties'}, } - api_version = "1.0" - def __init__( self, *, name: str, + api_version: Optional[str] = "1.1", system_data: Optional["SystemData"] = None, properties: Optional["PipelineTopologyProperties"] = None, **kwargs ): - super(PipelineTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Pipeline topology unique identifier. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Pipeline topology properties. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.PipelineTopologyProperties + """ + super(PipelineTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, api_version=api_version, **kwargs) self.method_name = 'PipelineTopologySetRequestBody' # type: str + self.api_version = api_version self.method_name = 'PipelineTopologySetRequestBody' # type: str self.name = name self.system_data = system_data self.properties = properties -class RtspSource(SourceNodeBase): - """RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a live pipeline. +class RateControl(msrest.serialization.Model): + """Class representing the video's rate control. + + :ivar bit_rate_limit: the maximum output bitrate in kbps. + :vartype bit_rate_limit: float + :ivar encoding_interval: Interval at which images are encoded and transmitted. + :vartype encoding_interval: float + :ivar frame_rate_limit: Maximum output framerate in fps. + :vartype frame_rate_limit: float + :ivar guaranteed_frame_rate: A value of true indicates that frame rate is a fixed value rather + than an upper limit, and that the video encoder shall prioritize frame rate over all other + adaptable configuration values such as bitrate. + :vartype guaranteed_frame_rate: bool + """ + + _attribute_map = { + 'bit_rate_limit': {'key': 'bitRateLimit', 'type': 'float'}, + 'encoding_interval': {'key': 'encodingInterval', 'type': 'float'}, + 'frame_rate_limit': {'key': 'frameRateLimit', 'type': 'float'}, + 'guaranteed_frame_rate': {'key': 'guaranteedFrameRate', 'type': 'bool'}, + } + + def __init__( + self, + *, + bit_rate_limit: Optional[float] = None, + encoding_interval: Optional[float] = None, + frame_rate_limit: Optional[float] = None, + guaranteed_frame_rate: Optional[bool] = None, + **kwargs + ): + """ + :keyword bit_rate_limit: the maximum output bitrate in kbps. + :paramtype bit_rate_limit: float + :keyword encoding_interval: Interval at which images are encoded and transmitted. + :paramtype encoding_interval: float + :keyword frame_rate_limit: Maximum output framerate in fps. + :paramtype frame_rate_limit: float + :keyword guaranteed_frame_rate: A value of true indicates that frame rate is a fixed value + rather than an upper limit, and that the video encoder shall prioritize frame rate over all + other adaptable configuration values such as bitrate. + :paramtype guaranteed_frame_rate: bool + """ + super(RateControl, self).__init__(**kwargs) + self.bit_rate_limit = bit_rate_limit + self.encoding_interval = encoding_interval + self.frame_rate_limit = frame_rate_limit + self.guaranteed_frame_rate = guaranteed_frame_rate + + +class RemoteDeviceAdapter(msrest.serialization.Model): + """The Video Analyzer edge module can act as a transparent gateway for video, enabling IoT devices to send video to the cloud from behind a firewall. A remote device adapter should be created for each such IoT device. Communication between the cloud and IoT device would then flow via the Video Analyzer edge module. All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When - using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the - RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are - interleaved in the HTTP connections alongside the RTSP messages. Possible values include: - "http", "tcp". - :type transport: str or ~azure.media.videoanalyzer.edge.models.RtspTransport - :param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This - contains the required information for Video Analyzer to connect to RTSP cameras and/or generic - RTSP servers. - :type endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + :ivar name: Required. The unique identifier for the remote device adapter. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Properties of the remote device adapter. + :vartype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties """ _validation = { - 'type': {'required': True}, 'name': {'required': True}, - 'endpoint': {'required': True}, } _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'transport': {'key': 'transport', 'type': 'str'}, - 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, + 'properties': {'key': 'properties', 'type': 'RemoteDeviceAdapterProperties'}, } def __init__( self, *, name: str, - endpoint: "EndpointBase", - transport: Optional[Union[str, "RtspTransport"]] = None, + system_data: Optional["SystemData"] = None, + properties: Optional["RemoteDeviceAdapterProperties"] = None, **kwargs ): - super(RtspSource, self).__init__(name=name, **kwargs) - self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str - self.transport = transport - self.endpoint = endpoint - + """ + :keyword name: Required. The unique identifier for the remote device adapter. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Properties of the remote device adapter. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties + """ + super(RemoteDeviceAdapter, self).__init__(**kwargs) + self.name = name + self.system_data = system_data + self.properties = properties + + +class RemoteDeviceAdapterCollection(msrest.serialization.Model): + """A list of remote device adapters. + + :ivar value: An array of remote device adapters. + :vartype value: list[~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter] + :ivar continuation_token: A continuation token to use in subsequent calls to enumerate through + the remote device adapter collection. This is used when the collection contains too many + results to return in one response. + :vartype continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[RemoteDeviceAdapter]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + *, + value: Optional[List["RemoteDeviceAdapter"]] = None, + continuation_token: Optional[str] = None, + **kwargs + ): + """ + :keyword value: An array of remote device adapters. + :paramtype value: list[~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter] + :keyword continuation_token: A continuation token to use in subsequent calls to enumerate + through the remote device adapter collection. This is used when the collection contains too + many results to return in one response. + :paramtype continuation_token: str + """ + super(RemoteDeviceAdapterCollection, self).__init__(**kwargs) + self.value = value + self.continuation_token = continuation_token + + +class RemoteDeviceAdapterDeleteRequest(MethodRequestEmptyBodyBase): + """Deletes an existing remote device adapter. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar name: Required. Resource name. + :vartype name: str + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + api_version: Optional[str] = "1.1", + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(RemoteDeviceAdapterDeleteRequest, self).__init__(api_version=api_version, name=name, **kwargs) + self.method_name = 'remoteDeviceAdapterDelete' # type: str + + +class RemoteDeviceAdapterGetRequest(MethodRequestEmptyBodyBase): + """Retrieves an existing remote device adapter. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar name: Required. Resource name. + :vartype name: str + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + api_version: Optional[str] = "1.1", + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. Resource name. + :paramtype name: str + """ + super(RemoteDeviceAdapterGetRequest, self).__init__(api_version=api_version, name=name, **kwargs) + self.method_name = 'remoteDeviceAdapterGet' # type: str + + +class RemoteDeviceAdapterListRequest(MethodRequest): + """List all existing remote device adapters. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + def __init__( + self, + *, + api_version: Optional[str] = "1.1", + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + """ + super(RemoteDeviceAdapterListRequest, self).__init__(api_version=api_version, **kwargs) + self.method_name = 'remoteDeviceAdapterList' # type: str + + +class RemoteDeviceAdapterProperties(msrest.serialization.Model): + """Remote device adapter properties. + + All required parameters must be populated in order to send to Azure. + + :ivar description: An optional description for the remote device adapter. + :vartype description: str + :ivar target: Required. The IoT device to which this remote device will connect. + :vartype target: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterTarget + :ivar iot_hub_device_connection: Required. Information that enables communication between the + IoT Hub and the IoT device - allowing this edge module to act as a transparent gateway between + the two. + :vartype iot_hub_device_connection: + ~azure.media.videoanalyzer.edge.models.IotHubDeviceConnection + """ + + _validation = { + 'target': {'required': True}, + 'iot_hub_device_connection': {'required': True}, + } + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'RemoteDeviceAdapterTarget'}, + 'iot_hub_device_connection': {'key': 'iotHubDeviceConnection', 'type': 'IotHubDeviceConnection'}, + } + + def __init__( + self, + *, + target: "RemoteDeviceAdapterTarget", + iot_hub_device_connection: "IotHubDeviceConnection", + description: Optional[str] = None, + **kwargs + ): + """ + :keyword description: An optional description for the remote device adapter. + :paramtype description: str + :keyword target: Required. The IoT device to which this remote device will connect. + :paramtype target: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterTarget + :keyword iot_hub_device_connection: Required. Information that enables communication between + the IoT Hub and the IoT device - allowing this edge module to act as a transparent gateway + between the two. + :paramtype iot_hub_device_connection: + ~azure.media.videoanalyzer.edge.models.IotHubDeviceConnection + """ + super(RemoteDeviceAdapterProperties, self).__init__(**kwargs) + self.description = description + self.target = target + self.iot_hub_device_connection = iot_hub_device_connection + + +class RemoteDeviceAdapterSetRequest(MethodRequest): + """Creates a new remote device adapter or updates an existing one. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar remote_device_adapter: Required. The Video Analyzer edge module can act as a transparent + gateway for video, enabling IoT devices to send video to the cloud from behind a firewall. A + remote device adapter should be created for each such IoT device. Communication between the + cloud and IoT device would then flow via the Video Analyzer edge module. + :vartype remote_device_adapter: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + 'remote_device_adapter': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'remote_device_adapter': {'key': 'remoteDeviceAdapter', 'type': 'RemoteDeviceAdapter'}, + } + + def __init__( + self, + *, + remote_device_adapter: "RemoteDeviceAdapter", + api_version: Optional[str] = "1.1", + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword remote_device_adapter: Required. The Video Analyzer edge module can act as a + transparent gateway for video, enabling IoT devices to send video to the cloud from behind a + firewall. A remote device adapter should be created for each such IoT device. Communication + between the cloud and IoT device would then flow via the Video Analyzer edge module. + :paramtype remote_device_adapter: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapter + """ + super(RemoteDeviceAdapterSetRequest, self).__init__(api_version=api_version, **kwargs) + self.method_name = 'remoteDeviceAdapterSet' # type: str + self.remote_device_adapter = remote_device_adapter + + +class RemoteDeviceAdapterSetRequestBody(RemoteDeviceAdapter, MethodRequest): + """The Video Analyzer edge module can act as a transparent gateway for video, enabling IoT devices to send video to the cloud from behind a firewall. A remote device adapter should be created for each such IoT device. Communication between the cloud and IoT device would then flow via the Video Analyzer edge module. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. Direct method method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: Video Analyzer API version. The only acceptable values to pass in are None + and "1.1". The default value is "1.1". + :vartype api_version: str + :ivar name: Required. The unique identifier for the remote device adapter. + :vartype name: str + :ivar system_data: Read-only system metadata associated with this object. + :vartype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :ivar properties: Properties of the remote device adapter. + :vartype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties + """ + + _validation = { + 'method_name': {'required': True, 'readonly': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, + 'properties': {'key': 'properties', 'type': 'RemoteDeviceAdapterProperties'}, + } + + def __init__( + self, + *, + name: str, + api_version: Optional[str] = "1.1", + system_data: Optional["SystemData"] = None, + properties: Optional["RemoteDeviceAdapterProperties"] = None, + **kwargs + ): + """ + :keyword api_version: Video Analyzer API version. The only acceptable values to pass in are + None and "1.1". The default value is "1.1". + :paramtype api_version: str + :keyword name: Required. The unique identifier for the remote device adapter. + :paramtype name: str + :keyword system_data: Read-only system metadata associated with this object. + :paramtype system_data: ~azure.media.videoanalyzer.edge.models.SystemData + :keyword properties: Properties of the remote device adapter. + :paramtype properties: ~azure.media.videoanalyzer.edge.models.RemoteDeviceAdapterProperties + """ + super(RemoteDeviceAdapterSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, api_version=api_version, **kwargs) + self.method_name = 'RemoteDeviceAdapterSetRequestBody' # type: str + self.api_version = api_version + self.method_name = 'RemoteDeviceAdapterSetRequestBody' # type: str + self.name = name + self.system_data = system_data + self.properties = properties + + +class RemoteDeviceAdapterTarget(msrest.serialization.Model): + """Properties of the remote device adapter target. + + All required parameters must be populated in order to send to Azure. + + :ivar host: Required. Hostname or IP address of the remote device. + :vartype host: str + """ + + _validation = { + 'host': {'required': True}, + } + + _attribute_map = { + 'host': {'key': 'host', 'type': 'str'}, + } + + def __init__( + self, + *, + host: str, + **kwargs + ): + """ + :keyword host: Required. Hostname or IP address of the remote device. + :paramtype host: str + """ + super(RemoteDeviceAdapterTarget, self).__init__(**kwargs) + self.host = host + + +class RtspSource(SourceNodeBase): + """RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a live pipeline. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When + using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the + RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are + interleaved in the HTTP connections alongside the RTSP messages. Possible values include: + "http", "tcp". + :vartype transport: str or ~azure.media.videoanalyzer.edge.models.RtspTransport + :ivar endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This + contains the required information for Video Analyzer to connect to RTSP cameras and/or generic + RTSP servers. + :vartype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'transport': {'key': 'transport', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, + } + + def __init__( + self, + *, + name: str, + endpoint: "EndpointBase", + transport: Optional[Union[str, "RtspTransport"]] = None, + **kwargs + ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When + using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the + RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are + interleaved in the HTTP connections alongside the RTSP messages. Possible values include: + "http", "tcp". + :paramtype transport: str or ~azure.media.videoanalyzer.edge.models.RtspTransport + :keyword endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This + contains the required information for Video Analyzer to connect to RTSP cameras and/or generic + RTSP servers. + :paramtype endpoint: ~azure.media.videoanalyzer.edge.models.EndpointBase + """ + super(RtspSource, self).__init__(name=name, **kwargs) + self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str + self.transport = transport + self.endpoint = endpoint + class SamplingOptions(msrest.serialization.Model): """Defines how often media is submitted to the extension plugin. - :param skip_samples_without_annotation: When set to 'true', prevents frames without upstream + :ivar skip_samples_without_annotation: When set to 'true', prevents frames without upstream inference data to be sent to the extension plugin. This is useful to limit the frames sent to the extension to pre-analyzed frames only. For example, when used downstream from a motion detector, this can enable for only frames in which motion has been detected to be further analyzed. - :type skip_samples_without_annotation: str - :param maximum_samples_per_second: Maximum rate of samples submitted to the extension. This + :vartype skip_samples_without_annotation: str + :ivar maximum_samples_per_second: Maximum rate of samples submitted to the extension. This prevents an extension plugin to be overloaded with data. - :type maximum_samples_per_second: str + :vartype maximum_samples_per_second: str """ _attribute_map = { @@ -2275,6 +3715,17 @@ def __init__( maximum_samples_per_second: Optional[str] = None, **kwargs ): + """ + :keyword skip_samples_without_annotation: When set to 'true', prevents frames without upstream + inference data to be sent to the extension plugin. This is useful to limit the frames sent to + the extension to pre-analyzed frames only. For example, when used downstream from a motion + detector, this can enable for only frames in which motion has been detected to be further + analyzed. + :paramtype skip_samples_without_annotation: str + :keyword maximum_samples_per_second: Maximum rate of samples submitted to the extension. This + prevents an extension plugin to be overloaded with data. + :paramtype maximum_samples_per_second: str + """ super(SamplingOptions, self).__init__(**kwargs) self.skip_samples_without_annotation = skip_samples_without_annotation self.maximum_samples_per_second = maximum_samples_per_second @@ -2285,31 +3736,31 @@ class SignalGateProcessor(ProcessorNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param activation_evaluation_window: The period of time over which the gate gathers input - events before evaluating them. - :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar activation_evaluation_window: The period of time over which the gate gathers input events + before evaluating them. + :vartype activation_evaluation_window: str + :ivar activation_signal_offset: Signal offset once the gate is activated (can be negative). It determines the how much farther behind of after the signal will be let through based on the activation time. A negative offset indicates that data prior the activation time must be included on the signal that is let through, once the gate is activated. When used upstream of a file or video sink, this allows for scenarios such as recording buffered media prior an event, such as: record video 5 seconds prior motions is detected. - :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open in the + :vartype activation_signal_offset: str + :ivar minimum_activation_time: The minimum period for which the gate remains open in the absence of subsequent triggers (events). When used upstream of a file or video sink, it determines the minimum length of the recorded video clip. - :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open in the + :vartype minimum_activation_time: str + :ivar maximum_activation_time: The maximum period for which the gate remains open in the presence of subsequent triggers (events). When used upstream of a file or video sink, it determines the maximum length of the recorded video clip. - :type maximum_activation_time: str + :vartype maximum_activation_time: str """ _validation = { @@ -2339,6 +3790,31 @@ def __init__( maximum_activation_time: Optional[str] = None, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword activation_evaluation_window: The period of time over which the gate gathers input + events before evaluating them. + :paramtype activation_evaluation_window: str + :keyword activation_signal_offset: Signal offset once the gate is activated (can be negative). + It determines the how much farther behind of after the signal will be let through based on the + activation time. A negative offset indicates that data prior the activation time must be + included on the signal that is let through, once the gate is activated. When used upstream of a + file or video sink, this allows for scenarios such as recording buffered media prior an event, + such as: record video 5 seconds prior motions is detected. + :paramtype activation_signal_offset: str + :keyword minimum_activation_time: The minimum period for which the gate remains open in the + absence of subsequent triggers (events). When used upstream of a file or video sink, it + determines the minimum length of the recorded video clip. + :paramtype minimum_activation_time: str + :keyword maximum_activation_time: The maximum period for which the gate remains open in the + presence of subsequent triggers (events). When used upstream of a file or video sink, it + determines the maximum length of the recorded video clip. + :paramtype maximum_activation_time: str + """ super(SignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SignalGateProcessor' # type: str self.activation_evaluation_window = activation_evaluation_window @@ -2355,8 +3831,8 @@ class SpatialAnalysisOperationBase(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str """ _validation = { @@ -2375,6 +3851,8 @@ def __init__( self, **kwargs ): + """ + """ super(SpatialAnalysisOperationBase, self).__init__(**kwargs) self.type = None # type: Optional[str] @@ -2384,11 +3862,11 @@ class SpatialAnalysisCustomOperation(SpatialAnalysisOperationBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param extension_configuration: Required. Custom configuration to pass to the Azure Cognitive + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar extension_configuration: Required. Custom configuration to pass to the Azure Cognitive Services Spatial Analysis module. - :type extension_configuration: str + :vartype extension_configuration: str """ _validation = { @@ -2407,6 +3885,11 @@ def __init__( extension_configuration: str, **kwargs ): + """ + :keyword extension_configuration: Required. Custom configuration to pass to the Azure Cognitive + Services Spatial Analysis module. + :paramtype extension_configuration: str + """ super(SpatialAnalysisCustomOperation, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisCustomOperation' # type: str self.extension_configuration = extension_configuration @@ -2415,11 +3898,11 @@ def __init__( class SpatialAnalysisOperationEventBase(msrest.serialization.Model): """Defines the Azure Cognitive Services Spatial Analysis operation eventing configuration. - :param threshold: The event threshold. - :type threshold: str - :param focus: The operation focus type. Possible values include: "center", "bottomCenter", + :ivar threshold: The event threshold. + :vartype threshold: str + :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". - :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus """ _attribute_map = { @@ -2434,6 +3917,13 @@ def __init__( focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, **kwargs ): + """ + :keyword threshold: The event threshold. + :paramtype threshold: str + :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", + "footprint". + :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + """ super(SpatialAnalysisOperationEventBase, self).__init__(**kwargs) self.threshold = threshold self.focus = focus @@ -2442,16 +3932,16 @@ def __init__( class SpatialAnalysisPersonCountEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person count operation eventing configuration. - :param threshold: The event threshold. - :type threshold: str - :param focus: The operation focus type. Possible values include: "center", "bottomCenter", + :ivar threshold: The event threshold. + :vartype threshold: str + :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". - :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus - :param trigger: The event trigger type. Possible values include: "event", "interval". - :type trigger: str or + :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :ivar trigger: The event trigger type. Possible values include: "event", "interval". + :vartype trigger: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEventTrigger - :param output_frequency: The event or interval output frequency. - :type output_frequency: str + :ivar output_frequency: The event or interval output frequency. + :vartype output_frequency: str """ _attribute_map = { @@ -2470,6 +3960,18 @@ def __init__( output_frequency: Optional[str] = None, **kwargs ): + """ + :keyword threshold: The event threshold. + :paramtype threshold: str + :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", + "footprint". + :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :keyword trigger: The event trigger type. Possible values include: "event", "interval". + :paramtype trigger: str or + ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEventTrigger + :keyword output_frequency: The event or interval output frequency. + :paramtype output_frequency: str + """ super(SpatialAnalysisPersonCountEvent, self).__init__(threshold=threshold, focus=focus, **kwargs) self.trigger = trigger self.output_frequency = output_frequency @@ -2483,17 +3985,23 @@ class SpatialAnalysisTypedOperationBase(SpatialAnalysisOperationBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param debug: If set to 'true', enables debugging mode for this operation. - :type debug: str - :param camera_configuration: Advanced camera configuration. - :type camera_configuration: str - :param detector_node_configuration: Advanced detector node configuration. - :type detector_node_configuration: str - :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar debug: If set to 'true', enables debugging mode for this operation. + :vartype debug: str + :ivar calibration_configuration: Advanced calibration configuration. + :vartype calibration_configuration: str + :ivar camera_configuration: Advanced camera configuration. + :vartype camera_configuration: str + :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :vartype camera_calibrator_node_configuration: str + :ivar detector_node_configuration: Advanced detector node configuration. + :vartype detector_node_configuration: str + :ivar tracker_node_configuration: Advanced tracker node configuration. + :vartype tracker_node_configuration: str + :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. - :type enable_face_mask_classifier: str + :vartype enable_face_mask_classifier: str """ _validation = { @@ -2503,8 +4011,11 @@ class SpatialAnalysisTypedOperationBase(SpatialAnalysisOperationBase): _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, + 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, + 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, + 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, } @@ -2516,16 +4027,39 @@ def __init__( self, *, debug: Optional[str] = None, + calibration_configuration: Optional[str] = None, camera_configuration: Optional[str] = None, + camera_calibrator_node_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, + tracker_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): + """ + :keyword debug: If set to 'true', enables debugging mode for this operation. + :paramtype debug: str + :keyword calibration_configuration: Advanced calibration configuration. + :paramtype calibration_configuration: str + :keyword camera_configuration: Advanced camera configuration. + :paramtype camera_configuration: str + :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :paramtype camera_calibrator_node_configuration: str + :keyword detector_node_configuration: Advanced detector node configuration. + :paramtype detector_node_configuration: str + :keyword tracker_node_configuration: Advanced tracker node configuration. + :paramtype tracker_node_configuration: str + :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this + operation. + :paramtype enable_face_mask_classifier: str + """ super(SpatialAnalysisTypedOperationBase, self).__init__(**kwargs) self.type = 'SpatialAnalysisTypedOperationBase' # type: str self.debug = debug + self.calibration_configuration = calibration_configuration self.camera_configuration = camera_configuration + self.camera_calibrator_node_configuration = camera_calibrator_node_configuration self.detector_node_configuration = detector_node_configuration + self.tracker_node_configuration = tracker_node_configuration self.enable_face_mask_classifier = enable_face_mask_classifier @@ -2534,19 +4068,26 @@ class SpatialAnalysisPersonCountOperation(SpatialAnalysisTypedOperationBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param debug: If set to 'true', enables debugging mode for this operation. - :type debug: str - :param camera_configuration: Advanced camera configuration. - :type camera_configuration: str - :param detector_node_configuration: Advanced detector node configuration. - :type detector_node_configuration: str - :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar debug: If set to 'true', enables debugging mode for this operation. + :vartype debug: str + :ivar calibration_configuration: Advanced calibration configuration. + :vartype calibration_configuration: str + :ivar camera_configuration: Advanced camera configuration. + :vartype camera_configuration: str + :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :vartype camera_calibrator_node_configuration: str + :ivar detector_node_configuration: Advanced detector node configuration. + :vartype detector_node_configuration: str + :ivar tracker_node_configuration: Advanced tracker node configuration. + :vartype tracker_node_configuration: str + :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. - :type enable_face_mask_classifier: str - :param zones: Required. The list of zones and optional events. - :type zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountZoneEvents] + :vartype enable_face_mask_classifier: str + :ivar zones: Required. The list of zones and optional events. + :vartype zones: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountZoneEvents] """ _validation = { @@ -2557,8 +4098,11 @@ class SpatialAnalysisPersonCountOperation(SpatialAnalysisTypedOperationBase): _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, + 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, + 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, + 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonCountZoneEvents]'}, } @@ -2568,12 +4112,35 @@ def __init__( *, zones: List["SpatialAnalysisPersonCountZoneEvents"], debug: Optional[str] = None, + calibration_configuration: Optional[str] = None, camera_configuration: Optional[str] = None, + camera_calibrator_node_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, + tracker_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): - super(SpatialAnalysisPersonCountOperation, self).__init__(debug=debug, camera_configuration=camera_configuration, detector_node_configuration=detector_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) + """ + :keyword debug: If set to 'true', enables debugging mode for this operation. + :paramtype debug: str + :keyword calibration_configuration: Advanced calibration configuration. + :paramtype calibration_configuration: str + :keyword camera_configuration: Advanced camera configuration. + :paramtype camera_configuration: str + :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :paramtype camera_calibrator_node_configuration: str + :keyword detector_node_configuration: Advanced detector node configuration. + :paramtype detector_node_configuration: str + :keyword tracker_node_configuration: Advanced tracker node configuration. + :paramtype tracker_node_configuration: str + :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this + operation. + :paramtype enable_face_mask_classifier: str + :keyword zones: Required. The list of zones and optional events. + :paramtype zones: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountZoneEvents] + """ + super(SpatialAnalysisPersonCountOperation, self).__init__(debug=debug, calibration_configuration=calibration_configuration, camera_configuration=camera_configuration, camera_calibrator_node_configuration=camera_calibrator_node_configuration, detector_node_configuration=detector_node_configuration, tracker_node_configuration=tracker_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonCountOperation' # type: str self.zones = zones @@ -2583,10 +4150,10 @@ class SpatialAnalysisPersonCountZoneEvents(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param zone: Required. The named zone. - :type zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase - :param events: The event configuration. - :type events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEvent] + :ivar zone: Required. The named zone. + :vartype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :ivar events: The event configuration. + :vartype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEvent] """ _validation = { @@ -2605,6 +4172,12 @@ def __init__( events: Optional[List["SpatialAnalysisPersonCountEvent"]] = None, **kwargs ): + """ + :keyword zone: Required. The named zone. + :paramtype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :keyword events: The event configuration. + :paramtype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonCountEvent] + """ super(SpatialAnalysisPersonCountZoneEvents, self).__init__(**kwargs) self.zone = zone self.events = events @@ -2613,20 +4186,20 @@ def __init__( class SpatialAnalysisPersonDistanceEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person distance operation eventing configuration. - :param threshold: The event threshold. - :type threshold: str - :param focus: The operation focus type. Possible values include: "center", "bottomCenter", + :ivar threshold: The event threshold. + :vartype threshold: str + :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". - :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus - :param trigger: The event trigger type. Possible values include: "event", "interval". - :type trigger: str or + :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :ivar trigger: The event trigger type. Possible values include: "event", "interval". + :vartype trigger: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEventTrigger - :param output_frequency: The event or interval output frequency. - :type output_frequency: str - :param minimum_distance_threshold: The minimum distance threshold. - :type minimum_distance_threshold: str - :param maximum_distance_threshold: The maximum distance threshold. - :type maximum_distance_threshold: str + :ivar output_frequency: The event or interval output frequency. + :vartype output_frequency: str + :ivar minimum_distance_threshold: The minimum distance threshold. + :vartype minimum_distance_threshold: str + :ivar maximum_distance_threshold: The maximum distance threshold. + :vartype maximum_distance_threshold: str """ _attribute_map = { @@ -2649,6 +4222,22 @@ def __init__( maximum_distance_threshold: Optional[str] = None, **kwargs ): + """ + :keyword threshold: The event threshold. + :paramtype threshold: str + :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", + "footprint". + :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :keyword trigger: The event trigger type. Possible values include: "event", "interval". + :paramtype trigger: str or + ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEventTrigger + :keyword output_frequency: The event or interval output frequency. + :paramtype output_frequency: str + :keyword minimum_distance_threshold: The minimum distance threshold. + :paramtype minimum_distance_threshold: str + :keyword maximum_distance_threshold: The maximum distance threshold. + :paramtype maximum_distance_threshold: str + """ super(SpatialAnalysisPersonDistanceEvent, self).__init__(threshold=threshold, focus=focus, **kwargs) self.trigger = trigger self.output_frequency = output_frequency @@ -2661,19 +4250,25 @@ class SpatialAnalysisPersonDistanceOperation(SpatialAnalysisTypedOperationBase): All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param debug: If set to 'true', enables debugging mode for this operation. - :type debug: str - :param camera_configuration: Advanced camera configuration. - :type camera_configuration: str - :param detector_node_configuration: Advanced detector node configuration. - :type detector_node_configuration: str - :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar debug: If set to 'true', enables debugging mode for this operation. + :vartype debug: str + :ivar calibration_configuration: Advanced calibration configuration. + :vartype calibration_configuration: str + :ivar camera_configuration: Advanced camera configuration. + :vartype camera_configuration: str + :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :vartype camera_calibrator_node_configuration: str + :ivar detector_node_configuration: Advanced detector node configuration. + :vartype detector_node_configuration: str + :ivar tracker_node_configuration: Advanced tracker node configuration. + :vartype tracker_node_configuration: str + :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. - :type enable_face_mask_classifier: str - :param zones: Required. The list of zones with optional events. - :type zones: + :vartype enable_face_mask_classifier: str + :ivar zones: Required. The list of zones with optional events. + :vartype zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceZoneEvents] """ @@ -2685,8 +4280,11 @@ class SpatialAnalysisPersonDistanceOperation(SpatialAnalysisTypedOperationBase): _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, + 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, + 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, + 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonDistanceZoneEvents]'}, } @@ -2696,12 +4294,35 @@ def __init__( *, zones: List["SpatialAnalysisPersonDistanceZoneEvents"], debug: Optional[str] = None, + calibration_configuration: Optional[str] = None, camera_configuration: Optional[str] = None, + camera_calibrator_node_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, + tracker_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): - super(SpatialAnalysisPersonDistanceOperation, self).__init__(debug=debug, camera_configuration=camera_configuration, detector_node_configuration=detector_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) + """ + :keyword debug: If set to 'true', enables debugging mode for this operation. + :paramtype debug: str + :keyword calibration_configuration: Advanced calibration configuration. + :paramtype calibration_configuration: str + :keyword camera_configuration: Advanced camera configuration. + :paramtype camera_configuration: str + :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :paramtype camera_calibrator_node_configuration: str + :keyword detector_node_configuration: Advanced detector node configuration. + :paramtype detector_node_configuration: str + :keyword tracker_node_configuration: Advanced tracker node configuration. + :paramtype tracker_node_configuration: str + :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this + operation. + :paramtype enable_face_mask_classifier: str + :keyword zones: Required. The list of zones with optional events. + :paramtype zones: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceZoneEvents] + """ + super(SpatialAnalysisPersonDistanceOperation, self).__init__(debug=debug, calibration_configuration=calibration_configuration, camera_configuration=camera_configuration, camera_calibrator_node_configuration=camera_calibrator_node_configuration, detector_node_configuration=detector_node_configuration, tracker_node_configuration=tracker_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonDistanceOperation' # type: str self.zones = zones @@ -2711,10 +4332,11 @@ class SpatialAnalysisPersonDistanceZoneEvents(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param zone: Required. The named zone. - :type zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase - :param events: The event configuration. - :type events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEvent] + :ivar zone: Required. The named zone. + :vartype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :ivar events: The event configuration. + :vartype events: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEvent] """ _validation = { @@ -2733,6 +4355,13 @@ def __init__( events: Optional[List["SpatialAnalysisPersonDistanceEvent"]] = None, **kwargs ): + """ + :keyword zone: Required. The named zone. + :paramtype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :keyword events: The event configuration. + :paramtype events: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonDistanceEvent] + """ super(SpatialAnalysisPersonDistanceZoneEvents, self).__init__(**kwargs) self.zone = zone self.events = events @@ -2741,11 +4370,11 @@ def __init__( class SpatialAnalysisPersonLineCrossingEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person line crossing operation eventing configuration. - :param threshold: The event threshold. - :type threshold: str - :param focus: The operation focus type. Possible values include: "center", "bottomCenter", + :ivar threshold: The event threshold. + :vartype threshold: str + :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". - :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus """ _attribute_map = { @@ -2760,6 +4389,13 @@ def __init__( focus: Optional[Union[str, "SpatialAnalysisOperationFocus"]] = None, **kwargs ): + """ + :keyword threshold: The event threshold. + :paramtype threshold: str + :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", + "footprint". + :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + """ super(SpatialAnalysisPersonLineCrossingEvent, self).__init__(threshold=threshold, focus=focus, **kwargs) @@ -2768,10 +4404,10 @@ class SpatialAnalysisPersonLineCrossingLineEvents(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param line: Required. The named line. - :type line: ~azure.media.videoanalyzer.edge.models.NamedLineBase - :param events: The event configuration. - :type events: + :ivar line: Required. The named line. + :vartype line: ~azure.media.videoanalyzer.edge.models.NamedLineBase + :ivar events: The event configuration. + :vartype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingEvent] """ @@ -2791,6 +4427,13 @@ def __init__( events: Optional[List["SpatialAnalysisPersonLineCrossingEvent"]] = None, **kwargs ): + """ + :keyword line: Required. The named line. + :paramtype line: ~azure.media.videoanalyzer.edge.models.NamedLineBase + :keyword events: The event configuration. + :paramtype events: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingEvent] + """ super(SpatialAnalysisPersonLineCrossingLineEvents, self).__init__(**kwargs) self.line = line self.events = events @@ -2801,19 +4444,25 @@ class SpatialAnalysisPersonLineCrossingOperation(SpatialAnalysisTypedOperationBa All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param debug: If set to 'true', enables debugging mode for this operation. - :type debug: str - :param camera_configuration: Advanced camera configuration. - :type camera_configuration: str - :param detector_node_configuration: Advanced detector node configuration. - :type detector_node_configuration: str - :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar debug: If set to 'true', enables debugging mode for this operation. + :vartype debug: str + :ivar calibration_configuration: Advanced calibration configuration. + :vartype calibration_configuration: str + :ivar camera_configuration: Advanced camera configuration. + :vartype camera_configuration: str + :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :vartype camera_calibrator_node_configuration: str + :ivar detector_node_configuration: Advanced detector node configuration. + :vartype detector_node_configuration: str + :ivar tracker_node_configuration: Advanced tracker node configuration. + :vartype tracker_node_configuration: str + :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. - :type enable_face_mask_classifier: str - :param lines: Required. The list of lines with optional events. - :type lines: + :vartype enable_face_mask_classifier: str + :ivar lines: Required. The list of lines with optional events. + :vartype lines: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingLineEvents] """ @@ -2825,8 +4474,11 @@ class SpatialAnalysisPersonLineCrossingOperation(SpatialAnalysisTypedOperationBa _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, + 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, + 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, + 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'lines': {'key': 'lines', 'type': '[SpatialAnalysisPersonLineCrossingLineEvents]'}, } @@ -2836,12 +4488,35 @@ def __init__( *, lines: List["SpatialAnalysisPersonLineCrossingLineEvents"], debug: Optional[str] = None, + calibration_configuration: Optional[str] = None, camera_configuration: Optional[str] = None, + camera_calibrator_node_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, + tracker_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): - super(SpatialAnalysisPersonLineCrossingOperation, self).__init__(debug=debug, camera_configuration=camera_configuration, detector_node_configuration=detector_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) + """ + :keyword debug: If set to 'true', enables debugging mode for this operation. + :paramtype debug: str + :keyword calibration_configuration: Advanced calibration configuration. + :paramtype calibration_configuration: str + :keyword camera_configuration: Advanced camera configuration. + :paramtype camera_configuration: str + :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :paramtype camera_calibrator_node_configuration: str + :keyword detector_node_configuration: Advanced detector node configuration. + :paramtype detector_node_configuration: str + :keyword tracker_node_configuration: Advanced tracker node configuration. + :paramtype tracker_node_configuration: str + :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this + operation. + :paramtype enable_face_mask_classifier: str + :keyword lines: Required. The list of lines with optional events. + :paramtype lines: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonLineCrossingLineEvents] + """ + super(SpatialAnalysisPersonLineCrossingOperation, self).__init__(debug=debug, calibration_configuration=calibration_configuration, camera_configuration=camera_configuration, camera_calibrator_node_configuration=camera_calibrator_node_configuration, detector_node_configuration=detector_node_configuration, tracker_node_configuration=tracker_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonLineCrossingOperation' # type: str self.lines = lines @@ -2849,13 +4524,13 @@ def __init__( class SpatialAnalysisPersonZoneCrossingEvent(SpatialAnalysisOperationEventBase): """Defines a Spatial Analysis person crossing zone operation eventing configuration. - :param threshold: The event threshold. - :type threshold: str - :param focus: The operation focus type. Possible values include: "center", "bottomCenter", + :ivar threshold: The event threshold. + :vartype threshold: str + :ivar focus: The operation focus type. Possible values include: "center", "bottomCenter", "footprint". - :type focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus - :param event_type: The event type. Possible values include: "zoneCrossing", "zoneDwellTime". - :type event_type: str or + :vartype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :ivar event_type: The event type. Possible values include: "zoneCrossing", "zoneDwellTime". + :vartype event_type: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEventType """ @@ -2873,6 +4548,16 @@ def __init__( event_type: Optional[Union[str, "SpatialAnalysisPersonZoneCrossingEventType"]] = None, **kwargs ): + """ + :keyword threshold: The event threshold. + :paramtype threshold: str + :keyword focus: The operation focus type. Possible values include: "center", "bottomCenter", + "footprint". + :paramtype focus: str or ~azure.media.videoanalyzer.edge.models.SpatialAnalysisOperationFocus + :keyword event_type: The event type. Possible values include: "zoneCrossing", "zoneDwellTime". + :paramtype event_type: str or + ~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEventType + """ super(SpatialAnalysisPersonZoneCrossingEvent, self).__init__(threshold=threshold, focus=focus, **kwargs) self.event_type = event_type @@ -2882,19 +4567,25 @@ class SpatialAnalysisPersonZoneCrossingOperation(SpatialAnalysisTypedOperationBa All required parameters must be populated in order to send to Azure. - :param type: Required. The Type discriminator for the derived types.Constant filled by server. - :type type: str - :param debug: If set to 'true', enables debugging mode for this operation. - :type debug: str - :param camera_configuration: Advanced camera configuration. - :type camera_configuration: str - :param detector_node_configuration: Advanced detector node configuration. - :type detector_node_configuration: str - :param enable_face_mask_classifier: If set to 'true', enables face mask detection for this + :ivar type: Required. The Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar debug: If set to 'true', enables debugging mode for this operation. + :vartype debug: str + :ivar calibration_configuration: Advanced calibration configuration. + :vartype calibration_configuration: str + :ivar camera_configuration: Advanced camera configuration. + :vartype camera_configuration: str + :ivar camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :vartype camera_calibrator_node_configuration: str + :ivar detector_node_configuration: Advanced detector node configuration. + :vartype detector_node_configuration: str + :ivar tracker_node_configuration: Advanced tracker node configuration. + :vartype tracker_node_configuration: str + :ivar enable_face_mask_classifier: If set to 'true', enables face mask detection for this operation. - :type enable_face_mask_classifier: str - :param zones: Required. The list of zones with optional events. - :type zones: + :vartype enable_face_mask_classifier: str + :ivar zones: Required. The list of zones with optional events. + :vartype zones: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingZoneEvents] """ @@ -2906,8 +4597,11 @@ class SpatialAnalysisPersonZoneCrossingOperation(SpatialAnalysisTypedOperationBa _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'debug': {'key': 'debug', 'type': 'str'}, + 'calibration_configuration': {'key': 'calibrationConfiguration', 'type': 'str'}, 'camera_configuration': {'key': 'cameraConfiguration', 'type': 'str'}, + 'camera_calibrator_node_configuration': {'key': 'cameraCalibratorNodeConfiguration', 'type': 'str'}, 'detector_node_configuration': {'key': 'detectorNodeConfiguration', 'type': 'str'}, + 'tracker_node_configuration': {'key': 'trackerNodeConfiguration', 'type': 'str'}, 'enable_face_mask_classifier': {'key': 'enableFaceMaskClassifier', 'type': 'str'}, 'zones': {'key': 'zones', 'type': '[SpatialAnalysisPersonZoneCrossingZoneEvents]'}, } @@ -2917,12 +4611,35 @@ def __init__( *, zones: List["SpatialAnalysisPersonZoneCrossingZoneEvents"], debug: Optional[str] = None, + calibration_configuration: Optional[str] = None, camera_configuration: Optional[str] = None, + camera_calibrator_node_configuration: Optional[str] = None, detector_node_configuration: Optional[str] = None, + tracker_node_configuration: Optional[str] = None, enable_face_mask_classifier: Optional[str] = None, **kwargs ): - super(SpatialAnalysisPersonZoneCrossingOperation, self).__init__(debug=debug, camera_configuration=camera_configuration, detector_node_configuration=detector_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) + """ + :keyword debug: If set to 'true', enables debugging mode for this operation. + :paramtype debug: str + :keyword calibration_configuration: Advanced calibration configuration. + :paramtype calibration_configuration: str + :keyword camera_configuration: Advanced camera configuration. + :paramtype camera_configuration: str + :keyword camera_calibrator_node_configuration: Advanced camera calibrator configuration. + :paramtype camera_calibrator_node_configuration: str + :keyword detector_node_configuration: Advanced detector node configuration. + :paramtype detector_node_configuration: str + :keyword tracker_node_configuration: Advanced tracker node configuration. + :paramtype tracker_node_configuration: str + :keyword enable_face_mask_classifier: If set to 'true', enables face mask detection for this + operation. + :paramtype enable_face_mask_classifier: str + :keyword zones: Required. The list of zones with optional events. + :paramtype zones: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingZoneEvents] + """ + super(SpatialAnalysisPersonZoneCrossingOperation, self).__init__(debug=debug, calibration_configuration=calibration_configuration, camera_configuration=camera_configuration, camera_calibrator_node_configuration=camera_calibrator_node_configuration, detector_node_configuration=detector_node_configuration, tracker_node_configuration=tracker_node_configuration, enable_face_mask_classifier=enable_face_mask_classifier, **kwargs) self.type = '#Microsoft.VideoAnalyzer.SpatialAnalysisPersonZoneCrossingOperation' # type: str self.zones = zones @@ -2932,10 +4649,10 @@ class SpatialAnalysisPersonZoneCrossingZoneEvents(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param zone: Required. The named zone. - :type zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase - :param events: The event configuration. - :type events: + :ivar zone: Required. The named zone. + :vartype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :ivar events: The event configuration. + :vartype events: list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEvent] """ @@ -2955,20 +4672,63 @@ def __init__( events: Optional[List["SpatialAnalysisPersonZoneCrossingEvent"]] = None, **kwargs ): + """ + :keyword zone: Required. The named zone. + :paramtype zone: ~azure.media.videoanalyzer.edge.models.NamedPolygonBase + :keyword events: The event configuration. + :paramtype events: + list[~azure.media.videoanalyzer.edge.models.SpatialAnalysisPersonZoneCrossingEvent] + """ super(SpatialAnalysisPersonZoneCrossingZoneEvents, self).__init__(**kwargs) self.zone = zone self.events = events +class SymmetricKeyCredentials(CredentialsBase): + """Symmetric key credential. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar key: Required. Symmetric key credential. + :vartype key: str + """ + + _validation = { + 'type': {'required': True}, + 'key': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'str'}, + } + + def __init__( + self, + *, + key: str, + **kwargs + ): + """ + :keyword key: Required. Symmetric key credential. + :paramtype key: str + """ + super(SymmetricKeyCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.VideoAnalyzer.SymmetricKeyCredentials' # type: str + self.key = key + + class SystemData(msrest.serialization.Model): """Read-only system metadata associated with a resource. - :param created_at: Date and time when this resource was first created. Value is represented in + :ivar created_at: Date and time when this resource was first created. Value is represented in UTC according to the ISO8601 date format. - :type created_at: ~datetime.datetime - :param last_modified_at: Date and time when this resource was last modified. Value is + :vartype created_at: ~datetime.datetime + :ivar last_modified_at: Date and time when this resource was last modified. Value is represented in UTC according to the ISO8601 date format. - :type last_modified_at: ~datetime.datetime + :vartype last_modified_at: ~datetime.datetime """ _attribute_map = { @@ -2983,6 +4743,14 @@ def __init__( last_modified_at: Optional[datetime.datetime] = None, **kwargs ): + """ + :keyword created_at: Date and time when this resource was first created. Value is represented + in UTC according to the ISO8601 date format. + :paramtype created_at: ~datetime.datetime + :keyword last_modified_at: Date and time when this resource was last modified. Value is + represented in UTC according to the ISO8601 date format. + :paramtype last_modified_at: ~datetime.datetime + """ super(SystemData, self).__init__(**kwargs) self.created_at = created_at self.last_modified_at = last_modified_at @@ -2993,19 +4761,19 @@ class TlsEndpoint(EndpointBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param credentials: Credentials to be presented to the endpoint. - :type credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase - :param url: Required. The endpoint URL for Video Analyzer to connect to. - :type url: str - :param trusted_certificates: List of trusted certificate authorities when authenticating a TLS + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar credentials: Credentials to be presented to the endpoint. + :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :ivar url: Required. The endpoint URL for Video Analyzer to connect to. + :vartype url: str + :ivar trusted_certificates: List of trusted certificate authorities when authenticating a TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities should be used. - :type trusted_certificates: ~azure.media.videoanalyzer.edge.models.CertificateSource - :param validation_options: Validation options to use when authenticating a TLS connection. By + :vartype trusted_certificates: ~azure.media.videoanalyzer.edge.models.CertificateSource + :ivar validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. - :type validation_options: ~azure.media.videoanalyzer.edge.models.TlsValidationOptions + :vartype validation_options: ~azure.media.videoanalyzer.edge.models.TlsValidationOptions """ _validation = { @@ -3030,6 +4798,19 @@ def __init__( validation_options: Optional["TlsValidationOptions"] = None, **kwargs ): + """ + :keyword credentials: Credentials to be presented to the endpoint. + :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :keyword url: Required. The endpoint URL for Video Analyzer to connect to. + :paramtype url: str + :keyword trusted_certificates: List of trusted certificate authorities when authenticating a + TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities + should be used. + :paramtype trusted_certificates: ~azure.media.videoanalyzer.edge.models.CertificateSource + :keyword validation_options: Validation options to use when authenticating a TLS connection. By + default, strict validation is used. + :paramtype validation_options: ~azure.media.videoanalyzer.edge.models.TlsValidationOptions + """ super(TlsEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates = trusted_certificates @@ -3039,12 +4820,12 @@ def __init__( class TlsValidationOptions(msrest.serialization.Model): """Options for controlling the validation of TLS endpoints. - :param ignore_hostname: When set to 'true' causes the certificate subject name validation to be + :ivar ignore_hostname: When set to 'true' causes the certificate subject name validation to be skipped. Default is 'false'. - :type ignore_hostname: str - :param ignore_signature: When set to 'true' causes the certificate chain trust validation to be + :vartype ignore_hostname: str + :ivar ignore_signature: When set to 'true' causes the certificate chain trust validation to be skipped. Default is 'false'. - :type ignore_signature: str + :vartype ignore_signature: str """ _attribute_map = { @@ -3059,6 +4840,14 @@ def __init__( ignore_signature: Optional[str] = None, **kwargs ): + """ + :keyword ignore_hostname: When set to 'true' causes the certificate subject name validation to + be skipped. Default is 'false'. + :paramtype ignore_hostname: str + :keyword ignore_signature: When set to 'true' causes the certificate chain trust validation to + be skipped. Default is 'false'. + :paramtype ignore_signature: str + """ super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = ignore_hostname self.ignore_signature = ignore_signature @@ -3069,12 +4858,12 @@ class UnsecuredEndpoint(EndpointBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param credentials: Credentials to be presented to the endpoint. - :type credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase - :param url: Required. The endpoint URL for Video Analyzer to connect to. - :type url: str + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar credentials: Credentials to be presented to the endpoint. + :vartype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :ivar url: Required. The endpoint URL for Video Analyzer to connect to. + :vartype url: str """ _validation = { @@ -3095,6 +4884,12 @@ def __init__( credentials: Optional["CredentialsBase"] = None, **kwargs ): + """ + :keyword credentials: Credentials to be presented to the endpoint. + :paramtype credentials: ~azure.media.videoanalyzer.edge.models.CredentialsBase + :keyword url: Required. The endpoint URL for Video Analyzer to connect to. + :paramtype url: str + """ super(UnsecuredEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str @@ -3104,14 +4899,14 @@ class UsernamePasswordCredentials(CredentialsBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param username: Required. Username to be presented as part of the credentials. - :type username: str - :param password: Required. Password to be presented as part of the credentials. It is + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar username: Required. Username to be presented as part of the credentials. + :vartype username: str + :ivar password: Required. Password to be presented as part of the credentials. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. - :type password: str + :vartype password: str """ _validation = { @@ -3133,6 +4928,14 @@ def __init__( password: str, **kwargs ): + """ + :keyword username: Required. Username to be presented as part of the credentials. + :paramtype username: str + :keyword password: Required. Password to be presented as part of the credentials. It is + recommended that this value is parameterized as a secret string in order to prevent this value + to be returned as part of the resource on API requests. + :paramtype password: str + """ super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username = username @@ -3142,26 +4945,30 @@ def __init__( class VideoCreationProperties(msrest.serialization.Model): """Optional video properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists. - :param title: Optional video title provided by the user. Value can be up to 256 characters - long. - :type title: str - :param description: Optional video description provided by the user. Value can be up to 2048 + :ivar title: Optional video title provided by the user. Value can be up to 256 characters long. + :vartype title: str + :ivar description: Optional video description provided by the user. Value can be up to 2048 characters long. - :type description: str - :param segment_length: Video segment length indicates the length of individual video files + :vartype description: str + :ivar segment_length: Video segment length indicates the length of individual video files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. Changing this value after the video is initially created can lead to errors when uploading media to the archive. Default value is 30 seconds. - :type segment_length: str + :vartype segment_length: str + :ivar retention_period: Video retention period indicates how long the video is kept in storage, + and must be a multiple of 1 day. For example, if this is set to 30 days, then content older + than 30 days will be deleted. + :vartype retention_period: str """ _attribute_map = { 'title': {'key': 'title', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'}, + 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__( @@ -3170,12 +4977,155 @@ def __init__( title: Optional[str] = None, description: Optional[str] = None, segment_length: Optional[str] = None, + retention_period: Optional[str] = None, **kwargs ): + """ + :keyword title: Optional video title provided by the user. Value can be up to 256 characters + long. + :paramtype title: str + :keyword description: Optional video description provided by the user. Value can be up to 2048 + characters long. + :paramtype description: str + :keyword segment_length: Video segment length indicates the length of individual video files + (segments) which are persisted to storage. Smaller segments provide lower archive playback + latency but generate larger volume of storage transactions. Larger segments reduce the amount + of storage transactions while increasing the archive playback latency. Value must be specified + in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to + 5 minutes, in 30 seconds increments. Changing this value after the video is initially created + can lead to errors when uploading media to the archive. Default value is 30 seconds. + :paramtype segment_length: str + :keyword retention_period: Video retention period indicates how long the video is kept in + storage, and must be a multiple of 1 day. For example, if this is set to 30 days, then content + older than 30 days will be deleted. + :paramtype retention_period: str + """ super(VideoCreationProperties, self).__init__(**kwargs) self.title = title self.description = description self.segment_length = segment_length + self.retention_period = retention_period + + +class VideoEncoderConfiguration(msrest.serialization.Model): + """Class representing the MPEG4 Configuration. + + :ivar encoding: The video codec used by the Media Profile. Possible values include: "JPEG", + "H264", "MPEG4". + :vartype encoding: str or ~azure.media.videoanalyzer.edge.models.VideoEncoding + :ivar quality: Relative value representing the quality of the video. + :vartype quality: float + :ivar resolution: The Video Resolution. + :vartype resolution: ~azure.media.videoanalyzer.edge.models.VideoResolution + :ivar rate_control: The Video's rate control. + :vartype rate_control: ~azure.media.videoanalyzer.edge.models.RateControl + :ivar h264: The H264 Configuration. + :vartype h264: ~azure.media.videoanalyzer.edge.models.H264Configuration + :ivar mpeg4: The H264 Configuration. + :vartype mpeg4: ~azure.media.videoanalyzer.edge.models.MPEG4Configuration + """ + + _attribute_map = { + 'encoding': {'key': 'encoding', 'type': 'str'}, + 'quality': {'key': 'quality', 'type': 'float'}, + 'resolution': {'key': 'resolution', 'type': 'VideoResolution'}, + 'rate_control': {'key': 'rateControl', 'type': 'RateControl'}, + 'h264': {'key': 'h264', 'type': 'H264Configuration'}, + 'mpeg4': {'key': 'mpeg4', 'type': 'MPEG4Configuration'}, + } + + def __init__( + self, + *, + encoding: Optional[Union[str, "VideoEncoding"]] = None, + quality: Optional[float] = None, + resolution: Optional["VideoResolution"] = None, + rate_control: Optional["RateControl"] = None, + h264: Optional["H264Configuration"] = None, + mpeg4: Optional["MPEG4Configuration"] = None, + **kwargs + ): + """ + :keyword encoding: The video codec used by the Media Profile. Possible values include: "JPEG", + "H264", "MPEG4". + :paramtype encoding: str or ~azure.media.videoanalyzer.edge.models.VideoEncoding + :keyword quality: Relative value representing the quality of the video. + :paramtype quality: float + :keyword resolution: The Video Resolution. + :paramtype resolution: ~azure.media.videoanalyzer.edge.models.VideoResolution + :keyword rate_control: The Video's rate control. + :paramtype rate_control: ~azure.media.videoanalyzer.edge.models.RateControl + :keyword h264: The H264 Configuration. + :paramtype h264: ~azure.media.videoanalyzer.edge.models.H264Configuration + :keyword mpeg4: The H264 Configuration. + :paramtype mpeg4: ~azure.media.videoanalyzer.edge.models.MPEG4Configuration + """ + super(VideoEncoderConfiguration, self).__init__(**kwargs) + self.encoding = encoding + self.quality = quality + self.resolution = resolution + self.rate_control = rate_control + self.h264 = h264 + self.mpeg4 = mpeg4 + + +class VideoPublishingOptions(msrest.serialization.Model): + """Options for changing video publishing behavior on the video sink and output video. + + :ivar enable_video_preview_image: When set to 'true' the video will publish preview images. + Default is 'false'. + :vartype enable_video_preview_image: str + """ + + _attribute_map = { + 'enable_video_preview_image': {'key': 'enableVideoPreviewImage', 'type': 'str'}, + } + + def __init__( + self, + *, + enable_video_preview_image: Optional[str] = None, + **kwargs + ): + """ + :keyword enable_video_preview_image: When set to 'true' the video will publish preview images. + Default is 'false'. + :paramtype enable_video_preview_image: str + """ + super(VideoPublishingOptions, self).__init__(**kwargs) + self.enable_video_preview_image = enable_video_preview_image + + +class VideoResolution(msrest.serialization.Model): + """The Video resolution. + + :ivar width: The number of columns of the Video image. + :vartype width: float + :ivar height: The number of lines of the Video image. + :vartype height: float + """ + + _attribute_map = { + 'width': {'key': 'width', 'type': 'float'}, + 'height': {'key': 'height', 'type': 'float'}, + } + + def __init__( + self, + *, + width: Optional[float] = None, + height: Optional[float] = None, + **kwargs + ): + """ + :keyword width: The number of columns of the Video image. + :paramtype width: float + :keyword height: The number of lines of the Video image. + :paramtype height: float + """ + super(VideoResolution, self).__init__(**kwargs) + self.width = width + self.height = height class VideoSink(SinkNodeBase): @@ -3183,28 +5133,33 @@ class VideoSink(SinkNodeBase): All required parameters must be populated in order to send to Azure. - :param type: Required. Type discriminator for the derived types.Constant filled by server. - :type type: str - :param name: Required. Node name. Must be unique within the topology. - :type name: str - :param inputs: Required. An array of upstream node references within the topology to be used as + :ivar type: Required. Type discriminator for the derived types.Constant filled by server. + :vartype type: str + :ivar name: Required. Node name. Must be unique within the topology. + :vartype name: str + :ivar inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. - :type inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] - :param video_name: Required. Name of a new or existing Video Analyzer video resource used for + :vartype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :ivar video_name: Required. Name of a new or existing Video Analyzer video resource used for the media recording. - :type video_name: str - :param video_creation_properties: Optional video properties to be used in case a new video + :vartype video_name: str + :ivar video_creation_properties: Optional video properties to be used in case a new video resource needs to be created on the service. - :type video_creation_properties: ~azure.media.videoanalyzer.edge.models.VideoCreationProperties - :param local_media_cache_path: Required. Path to a local file system directory for caching of + :vartype video_creation_properties: + ~azure.media.videoanalyzer.edge.models.VideoCreationProperties + :ivar video_publishing_options: Optional video publishing options to be used for changing + publishing behavior of the output video. + :vartype video_publishing_options: + ~azure.media.videoanalyzer.edge.models.VideoPublishingOptions + :ivar local_media_cache_path: Required. Path to a local file system directory for caching of temporary media files. This will also be used to store content which cannot be immediately uploaded to Azure due to Internet connectivity issues. - :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be + :vartype local_media_cache_path: str + :ivar local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be used for caching of temporary media files. Once this limit is reached, the oldest segments of the media archive will be continuously deleted in order to make space for new media, thus leading to gaps in the cloud recorded content. - :type local_media_cache_maximum_size_mi_b: str + :vartype local_media_cache_maximum_size_mi_b: str """ _validation = { @@ -3222,6 +5177,7 @@ class VideoSink(SinkNodeBase): 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, + 'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, } @@ -3235,11 +5191,40 @@ def __init__( local_media_cache_path: str, local_media_cache_maximum_size_mi_b: str, video_creation_properties: Optional["VideoCreationProperties"] = None, + video_publishing_options: Optional["VideoPublishingOptions"] = None, **kwargs ): + """ + :keyword name: Required. Node name. Must be unique within the topology. + :paramtype name: str + :keyword inputs: Required. An array of upstream node references within the topology to be used + as inputs for this node. + :paramtype inputs: list[~azure.media.videoanalyzer.edge.models.NodeInput] + :keyword video_name: Required. Name of a new or existing Video Analyzer video resource used for + the media recording. + :paramtype video_name: str + :keyword video_creation_properties: Optional video properties to be used in case a new video + resource needs to be created on the service. + :paramtype video_creation_properties: + ~azure.media.videoanalyzer.edge.models.VideoCreationProperties + :keyword video_publishing_options: Optional video publishing options to be used for changing + publishing behavior of the output video. + :paramtype video_publishing_options: + ~azure.media.videoanalyzer.edge.models.VideoPublishingOptions + :keyword local_media_cache_path: Required. Path to a local file system directory for caching of + temporary media files. This will also be used to store content which cannot be immediately + uploaded to Azure due to Internet connectivity issues. + :paramtype local_media_cache_path: str + :keyword local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can + be used for caching of temporary media files. Once this limit is reached, the oldest segments + of the media archive will be continuously deleted in order to make space for new media, thus + leading to gaps in the cloud recorded content. + :paramtype local_media_cache_maximum_size_mi_b: str + """ super(VideoSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name = video_name self.video_creation_properties = video_creation_properties + self.video_publishing_options = video_publishing_options self.local_media_cache_path = local_media_cache_path self.local_media_cache_maximum_size_mi_b = local_media_cache_maximum_size_mi_b diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_version.py b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_version.py index 4dd1b2d847f8..9c0a682a433e 100644 --- a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_version.py +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/azure/media/videoanalyzeredge/_version.py @@ -4,4 +4,4 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------- -VERSION = "1.0.0b2" +VERSION = "1.0.0b3" diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/samples/sample_lva.py b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/samples/sample_lva.py index 2ab1f294951a..71a5b419674b 100644 --- a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/samples/sample_lva.py +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/samples/sample_lva.py @@ -6,12 +6,13 @@ from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time -device_id = "lva-sample-device" -module_d = "mediaEdge" -connection_string = "connectionString" +device_id = os.getenv("iothub_deviceid"); +module_d = os.getenv("iothub_moduleid"); +connection_string = os.getenv("iothub_connectionstring"); live_pipeline_name = "pipelineInstance1" pipeline_topology_name = "pipelineTopology1" -url = "rtsp://sample-url-from-camera" +url = "rtsp://camerasimulator:8554" + def build_pipeline_topology(): pipeline_topology_properties = PipelineTopologyProperties() @@ -19,18 +20,18 @@ def build_pipeline_topology(): user_name_param = ParameterDeclaration(name="rtspUserName",type="String",default="testusername") password_param = ParameterDeclaration(name="rtspPassword",type="SecretString",default="testpassword") url_param = ParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com") - hub_param = ParameterDeclaration(name="hubSinkOutputName",type="String") source = RtspSource(name="rtspSource", endpoint=UnsecuredEndpoint(url="${rtspUrl}",credentials=UsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}"))) node = NodeInput(node_name="rtspSource") - sink = IotHubMessageSink("msgSink", node, "${hubSinkOutputName}") - pipeline_topology_properties.parameters = [user_name_param, password_param, url_param, hub_param] + sink = VideoSink(name="videoSink", inputs=[node], video_name="video", local_media_cache_path="/var/lib/videoanalyzer/tmp/", local_media_cache_maximum_size_mi_b="1024"); + pipeline_topology_properties.parameters = [user_name_param, password_param, url_param] pipeline_topology_properties.sources = [source] pipeline_topology_properties.sinks = [sink] pipeline_topology = PipelineTopology(name=pipeline_topology_name,properties=pipeline_topology_properties) return pipeline_topology + def build_live_pipeline(): url_param = ParameterDefinition(name="rtspUrl", value=url) pass_param = ParameterDefinition(name="rtspPassword", value="secret_password") @@ -40,49 +41,93 @@ def build_live_pipeline(): return live_pipeline + def invoke_method_helper(method): direct_method = CloudToDeviceMethod(method_name=method.method_name, payload=method.serialize()) - registry_manager = IoTHubRegistryManager(connection_string) + registry_manager = IoTHubRegistryManager(connection_string=connection_string) - payload = registry_manager.invoke_device_module_method(device_id, module_d, direct_method).payload + payload = registry_manager.invoke_device_module_method(device_id=device_id, module_id=module_d, direct_method_request=direct_method).payload if payload is not None and 'error' in payload: print(payload['error']) return None return payload -def main(): - pipeline_topology = build_pipeline_topology() - live_pipeline = build_live_pipeline() +def create_remote_device_adapter(device_name, iot_device_name): + registry_manager = IoTHubRegistryManager(connection_string=connection_string) try: - set_pipeline_top_response = invoke_method_helper(PipelineTopologySetRequest(pipeline_topology=pipeline_topology)) - print(set_pipeline_top_response) - - list_pipeline_top_response = invoke_method_helper(PipelineTopologyListRequest()) - if list_pipeline_top_response: - list_pipeline_top_result = PipelineTopologyCollection.deserialize(list_pipeline_top_response) + iot_device = registry_manager.get_device(device_id=iot_device_name) + except Exception as ex: + iot_device = registry_manager.create_device_with_certificate_authority(device_id=iot_device_name, status="enabled") + + remote_device_properties = RemoteDeviceAdapterProperties(target=RemoteDeviceAdapterTarget(host="camerasimulator"), iot_hub_device_connection=IotHubDeviceConnection(device_id=iot_device_name,credentials=SymmetricKeyCredentials(key=iot_device.authentication.symmetric_key))) + return RemoteDeviceAdapter(name=device_name, properties=remote_device_properties) + + +def sendPipelineRequests(pipeline_topology, live_pipeline): + set_pipeline_top_response = invoke_method_helper(PipelineTopologySetRequest(pipeline_topology=pipeline_topology)) + if set_pipeline_top_response: + set_pipeline_top_result = PipelineTopology.deserialize((set_pipeline_top_response)) + + list_pipeline_top_response = invoke_method_helper(PipelineTopologyListRequest()) + if list_pipeline_top_response: + list_pipeline_top_result = PipelineTopologyCollection.deserialize(list_pipeline_top_response) + + get_pipeline_top_response = invoke_method_helper(PipelineTopologyGetRequest(name=pipeline_topology_name)) + if get_pipeline_top_response: + get_pipeline_top_result = PipelineTopology.deserialize(get_pipeline_top_response) - get_pipeline_top_response = invoke_method_helper(PipelineTopologyGetRequest(name=pipeline_topology_name)) - if get_pipeline_top_response: - get_pipeline_top_result = PipelineTopology.deserialize(get_pipeline_top_response) + set_live_pipeline_response = invoke_method_helper(LivePipelineSetRequest(live_pipeline=live_pipeline)) - set_live_pipeline_response = invoke_method_helper(LivePipelineSetRequest(live_pipeline=live_pipeline)) + activate_pipeline_response = invoke_method_helper(LivePipelineActivateRequest(name=live_pipeline_name)) - activate_pipeline_response = invoke_method_helper(LivePipelineActivateRequest(name=live_pipeline_name)) + get_pipeline_response = invoke_method_helper(LivePipelineGetRequest(name=live_pipeline_name)) + if get_pipeline_response: + get_pipeline_result = LivePipeline.deserialize(get_pipeline_response) - get_pipeline_response = invoke_method_helper(LivePipelineGetRequest(name=live_pipeline_name)) - if get_pipeline_response: - get_pipeline_result = LivePipeline.deserialize(get_pipeline_response) + deactivate_pipeline_response = invoke_method_helper(LivePipelineDeactivateRequest(name=live_pipeline_name)) - deactivate_pipeline_response = invoke_method_helper(LivePipelineDeactivateRequest(name=live_pipeline_name)) + delete_pipeline_response = invoke_method_helper(LivePipelineDeleteRequest(name=live_pipeline_name)) - delete_pipeline_response = invoke_method_helper(LivePipelineDeleteRequest(name=live_pipeline_name)) + delete_pipeline_response = invoke_method_helper(PipelineTopologyDeleteRequest(name=pipeline_topology_name)) - delete_pipeline_response = invoke_method_helper(PipelineTopologyDeleteRequest(name=pipeline_topology_name)) +def sendDeviceRequests(remote_device_adapter): + remote_device_adapter_set_response = invoke_method_helper( + RemoteDeviceAdapterSetRequest(remote_device_adapter=remote_device_adapter)) + if remote_device_adapter_set_response: + remote_device_adapter_set_result = RemoteDeviceAdapter.deserialize(remote_device_adapter_set_response) + + remote_device_adapter_get_response = invoke_method_helper( + RemoteDeviceAdapterGetRequest(name=remote_device_adapter.name)) + if remote_device_adapter_get_response: + remote_device_adapter_get_result = RemoteDeviceAdapter.deserialize(remote_device_adapter_get_response) + + remote_device_adapter_list_response = invoke_method_helper(RemoteDeviceAdapterListRequest()) + if remote_device_adapter_list_response: + remote_device_adapter_list_result = RemoteDeviceAdapterCollection.deserialize( + remote_device_adapter_list_response) + + remote_device_adapter_delete_response = invoke_method_helper( + RemoteDeviceAdapterDeleteRequest(name=remote_device_adapter.name)) + + onvif_list_response = invoke_method_helper(OnvifDeviceDiscoverRequest()) + + onvif_get_response = invoke_method_helper( + OnvifDeviceGetRequest(endpoint=UnsecuredEndpoint(url="rtsp://camerasimulator:8554"))) + + +def main(): + pipeline_topology = build_pipeline_topology() + live_pipeline = build_live_pipeline() + remote_device_adapter = create_remote_device_adapter(device_name="RemoteDeviceAdapter1", iot_device_name="iotdevicesample1") + try: + sendPipelineRequests(pipeline_topology=pipeline_topology, live_pipeline=live_pipeline) + sendDeviceRequests(remote_device_adapter=remote_device_adapter) except Exception as ex: print(ex) + if __name__ == "__main__": main() \ No newline at end of file diff --git a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/swagger/autorest.md b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/swagger/autorest.md index a9238e7e0c9f..79a0c25697df 100644 --- a/sdk/videoanalyzer/azure-media-videoanalyzer-edge/swagger/autorest.md +++ b/sdk/videoanalyzer/azure-media-videoanalyzer-edge/swagger/autorest.md @@ -10,7 +10,7 @@ autorest --v3 --python ## Settings ```yaml -require: https://github.com/Azure/azure-rest-api-specs/blob/55b3e2d075398ec62f9322829494ff6a4323e299/specification/videoanalyzer/data-plane/readme.md +require: https://github.com/Azure/azure-rest-api-specs/blob/694fe69245024447f8d3647be1da88e9ad942058/specification/videoanalyzer/data-plane/readme.md output-folder: ../azure/media/videoanalyzeredge/_generated namespace: azure.media.videoanalyzer.edge no-namespace-folders: true