diff --git a/azure-mgmt-media/azure/mgmt/media/models/__init__.py b/azure-mgmt-media/azure/mgmt/media/models/__init__.py index b288dbfab357..324f39a1d03b 100644 --- a/azure-mgmt-media/azure/mgmt/media/models/__init__.py +++ b/azure-mgmt-media/azure/mgmt/media/models/__init__.py @@ -104,7 +104,9 @@ from .transform_output_py3 import TransformOutput from .transform_py3 import Transform from .job_input_py3 import JobInput + from .clip_time_py3 import ClipTime from .job_input_clip_py3 import JobInputClip + from .absolute_clip_time_py3 import AbsoluteClipTime from .job_inputs_py3 import JobInputs from .job_input_asset_py3 import JobInputAsset from .job_input_http_py3 import JobInputHttp @@ -247,7 +249,9 @@ from .transform_output import TransformOutput from .transform import Transform from .job_input import JobInput + from .clip_time import ClipTime from .job_input_clip import JobInputClip + from .absolute_clip_time import AbsoluteClipTime from .job_inputs import JobInputs from .job_input_asset import JobInputAsset from .job_input_http import JobInputHttp @@ -447,7 +451,9 @@ 'TransformOutput', 'Transform', 'JobInput', + 'ClipTime', 'JobInputClip', + 'AbsoluteClipTime', 'JobInputs', 'JobInputAsset', 'JobInputHttp', diff --git a/azure-mgmt-media/azure/mgmt/media/models/absolute_clip_time.py b/azure-mgmt-media/azure/mgmt/media/models/absolute_clip_time.py new file mode 100644 index 000000000000..979adde664b3 --- /dev/null +++ b/azure-mgmt-media/azure/mgmt/media/models/absolute_clip_time.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .clip_time import ClipTime + + +class AbsoluteClipTime(ClipTime): + """Specifies the clip time as an absolute time position in the media file. + The absolute time can point to a different position depending on whether + the media file starts from a timestamp of zero or not. + + All required parameters must be populated in order to send to Azure. + + :param odatatype: Required. Constant filled by server. + :type odatatype: str + :param time: Required. The time position on the timeline of the input + media. It is usually speicified as an ISO8601 period. e.g PT30S for 30 + seconds. + :type time: timedelta + """ + + _validation = { + 'odatatype': {'required': True}, + 'time': {'required': True}, + } + + _attribute_map = { + 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, + 'time': {'key': 'time', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(AbsoluteClipTime, self).__init__(**kwargs) + self.time = kwargs.get('time', None) + self.odatatype = '#Microsoft.Media.AbsoluteClipTime' diff --git a/azure-mgmt-media/azure/mgmt/media/models/absolute_clip_time_py3.py b/azure-mgmt-media/azure/mgmt/media/models/absolute_clip_time_py3.py new file mode 100644 index 000000000000..fc17b1cffd77 --- /dev/null +++ b/azure-mgmt-media/azure/mgmt/media/models/absolute_clip_time_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .clip_time_py3 import ClipTime + + +class AbsoluteClipTime(ClipTime): + """Specifies the clip time as an absolute time position in the media file. + The absolute time can point to a different position depending on whether + the media file starts from a timestamp of zero or not. + + All required parameters must be populated in order to send to Azure. + + :param odatatype: Required. Constant filled by server. + :type odatatype: str + :param time: Required. The time position on the timeline of the input + media. It is usually speicified as an ISO8601 period. e.g PT30S for 30 + seconds. + :type time: timedelta + """ + + _validation = { + 'odatatype': {'required': True}, + 'time': {'required': True}, + } + + _attribute_map = { + 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, + 'time': {'key': 'time', 'type': 'duration'}, + } + + def __init__(self, *, time, **kwargs) -> None: + super(AbsoluteClipTime, self).__init__(**kwargs) + self.time = time + self.odatatype = '#Microsoft.Media.AbsoluteClipTime' diff --git a/azure-mgmt-media/azure/mgmt/media/models/clip_time.py b/azure-mgmt-media/azure/mgmt/media/models/clip_time.py new file mode 100644 index 000000000000..1d454984122f --- /dev/null +++ b/azure-mgmt-media/azure/mgmt/media/models/clip_time.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClipTime(Model): + """Base class for specifying a clip time. Use sub classes of this class to + specify the time position in the media. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: AbsoluteClipTime + + All required parameters must be populated in order to send to Azure. + + :param odatatype: Required. Constant filled by server. + :type odatatype: str + """ + + _validation = { + 'odatatype': {'required': True}, + } + + _attribute_map = { + 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odatatype': {'#Microsoft.Media.AbsoluteClipTime': 'AbsoluteClipTime'} + } + + def __init__(self, **kwargs): + super(ClipTime, self).__init__(**kwargs) + self.odatatype = None diff --git a/azure-mgmt-media/azure/mgmt/media/models/clip_time_py3.py b/azure-mgmt-media/azure/mgmt/media/models/clip_time_py3.py new file mode 100644 index 000000000000..590b6a643c7d --- /dev/null +++ b/azure-mgmt-media/azure/mgmt/media/models/clip_time_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClipTime(Model): + """Base class for specifying a clip time. Use sub classes of this class to + specify the time position in the media. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: AbsoluteClipTime + + All required parameters must be populated in order to send to Azure. + + :param odatatype: Required. Constant filled by server. + :type odatatype: str + """ + + _validation = { + 'odatatype': {'required': True}, + } + + _attribute_map = { + 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odatatype': {'#Microsoft.Media.AbsoluteClipTime': 'AbsoluteClipTime'} + } + + def __init__(self, **kwargs) -> None: + super(ClipTime, self).__init__(**kwargs) + self.odatatype = None diff --git a/azure-mgmt-media/azure/mgmt/media/models/job_input_asset.py b/azure-mgmt-media/azure/mgmt/media/models/job_input_asset.py index 687100fc2c90..4637428a380b 100644 --- a/azure-mgmt-media/azure/mgmt/media/models/job_input_asset.py +++ b/azure-mgmt-media/azure/mgmt/media/models/job_input_asset.py @@ -22,6 +22,12 @@ class JobInputAsset(JobInputClip): :param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each. :type files: list[str] + :param start: Defines a point on the timeline of the input media at which + processing will start. Defaults to the beginning of the input media. + :type start: ~azure.mgmt.media.models.ClipTime + :param end: Defines a point on the timeline of the input media at which + processing will end. Defaults to the end of the input media. + :type end: ~azure.mgmt.media.models.ClipTime :param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as @@ -41,6 +47,8 @@ class JobInputAsset(JobInputClip): _attribute_map = { 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, 'files': {'key': 'files', 'type': '[str]'}, + 'start': {'key': 'start', 'type': 'ClipTime'}, + 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, 'asset_name': {'key': 'assetName', 'type': 'str'}, } diff --git a/azure-mgmt-media/azure/mgmt/media/models/job_input_asset_py3.py b/azure-mgmt-media/azure/mgmt/media/models/job_input_asset_py3.py index 263d4ac593c8..36298f70d120 100644 --- a/azure-mgmt-media/azure/mgmt/media/models/job_input_asset_py3.py +++ b/azure-mgmt-media/azure/mgmt/media/models/job_input_asset_py3.py @@ -22,6 +22,12 @@ class JobInputAsset(JobInputClip): :param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each. :type files: list[str] + :param start: Defines a point on the timeline of the input media at which + processing will start. Defaults to the beginning of the input media. + :type start: ~azure.mgmt.media.models.ClipTime + :param end: Defines a point on the timeline of the input media at which + processing will end. Defaults to the end of the input media. + :type end: ~azure.mgmt.media.models.ClipTime :param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as @@ -41,11 +47,13 @@ class JobInputAsset(JobInputClip): _attribute_map = { 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, 'files': {'key': 'files', 'type': '[str]'}, + 'start': {'key': 'start', 'type': 'ClipTime'}, + 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, 'asset_name': {'key': 'assetName', 'type': 'str'}, } - def __init__(self, *, asset_name: str, files=None, label: str=None, **kwargs) -> None: - super(JobInputAsset, self).__init__(files=files, label=label, **kwargs) + def __init__(self, *, asset_name: str, files=None, start=None, end=None, label: str=None, **kwargs) -> None: + super(JobInputAsset, self).__init__(files=files, start=start, end=end, label=label, **kwargs) self.asset_name = asset_name self.odatatype = '#Microsoft.Media.JobInputAsset' diff --git a/azure-mgmt-media/azure/mgmt/media/models/job_input_clip.py b/azure-mgmt-media/azure/mgmt/media/models/job_input_clip.py index 5187d14e6234..95c4f7e67701 100644 --- a/azure-mgmt-media/azure/mgmt/media/models/job_input_clip.py +++ b/azure-mgmt-media/azure/mgmt/media/models/job_input_clip.py @@ -25,6 +25,12 @@ class JobInputClip(JobInput): :param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each. :type files: list[str] + :param start: Defines a point on the timeline of the input media at which + processing will start. Defaults to the beginning of the input media. + :type start: ~azure.mgmt.media.models.ClipTime + :param end: Defines a point on the timeline of the input media at which + processing will end. Defaults to the end of the input media. + :type end: ~azure.mgmt.media.models.ClipTime :param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as @@ -41,6 +47,8 @@ class JobInputClip(JobInput): _attribute_map = { 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, 'files': {'key': 'files', 'type': '[str]'}, + 'start': {'key': 'start', 'type': 'ClipTime'}, + 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, } @@ -51,5 +59,7 @@ class JobInputClip(JobInput): def __init__(self, **kwargs): super(JobInputClip, self).__init__(**kwargs) self.files = kwargs.get('files', None) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) self.label = kwargs.get('label', None) self.odatatype = '#Microsoft.Media.JobInputClip' diff --git a/azure-mgmt-media/azure/mgmt/media/models/job_input_clip_py3.py b/azure-mgmt-media/azure/mgmt/media/models/job_input_clip_py3.py index b6bbdb3e4e1e..47f77b4c1547 100644 --- a/azure-mgmt-media/azure/mgmt/media/models/job_input_clip_py3.py +++ b/azure-mgmt-media/azure/mgmt/media/models/job_input_clip_py3.py @@ -25,6 +25,12 @@ class JobInputClip(JobInput): :param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each. :type files: list[str] + :param start: Defines a point on the timeline of the input media at which + processing will start. Defaults to the beginning of the input media. + :type start: ~azure.mgmt.media.models.ClipTime + :param end: Defines a point on the timeline of the input media at which + processing will end. Defaults to the end of the input media. + :type end: ~azure.mgmt.media.models.ClipTime :param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as @@ -41,6 +47,8 @@ class JobInputClip(JobInput): _attribute_map = { 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, 'files': {'key': 'files', 'type': '[str]'}, + 'start': {'key': 'start', 'type': 'ClipTime'}, + 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, } @@ -48,8 +56,10 @@ class JobInputClip(JobInput): 'odatatype': {'#Microsoft.Media.JobInputAsset': 'JobInputAsset', '#Microsoft.Media.JobInputHttp': 'JobInputHttp'} } - def __init__(self, *, files=None, label: str=None, **kwargs) -> None: + def __init__(self, *, files=None, start=None, end=None, label: str=None, **kwargs) -> None: super(JobInputClip, self).__init__(**kwargs) self.files = files + self.start = start + self.end = end self.label = label self.odatatype = '#Microsoft.Media.JobInputClip' diff --git a/azure-mgmt-media/azure/mgmt/media/models/job_input_http.py b/azure-mgmt-media/azure/mgmt/media/models/job_input_http.py index a12e24dec5a1..bbabe32f599b 100644 --- a/azure-mgmt-media/azure/mgmt/media/models/job_input_http.py +++ b/azure-mgmt-media/azure/mgmt/media/models/job_input_http.py @@ -22,6 +22,12 @@ class JobInputHttp(JobInputClip): :param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each. :type files: list[str] + :param start: Defines a point on the timeline of the input media at which + processing will start. Defaults to the beginning of the input media. + :type start: ~azure.mgmt.media.models.ClipTime + :param end: Defines a point on the timeline of the input media at which + processing will end. Defaults to the end of the input media. + :type end: ~azure.mgmt.media.models.ClipTime :param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as @@ -43,6 +49,8 @@ class JobInputHttp(JobInputClip): _attribute_map = { 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, 'files': {'key': 'files', 'type': '[str]'}, + 'start': {'key': 'start', 'type': 'ClipTime'}, + 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, 'base_uri': {'key': 'baseUri', 'type': 'str'}, } diff --git a/azure-mgmt-media/azure/mgmt/media/models/job_input_http_py3.py b/azure-mgmt-media/azure/mgmt/media/models/job_input_http_py3.py index 0cc04900ba5e..1c7df3ed389e 100644 --- a/azure-mgmt-media/azure/mgmt/media/models/job_input_http_py3.py +++ b/azure-mgmt-media/azure/mgmt/media/models/job_input_http_py3.py @@ -22,6 +22,12 @@ class JobInputHttp(JobInputClip): :param files: List of files. Required for JobInputHttp. Maximum of 4000 characters each. :type files: list[str] + :param start: Defines a point on the timeline of the input media at which + processing will start. Defaults to the beginning of the input media. + :type start: ~azure.mgmt.media.models.ClipTime + :param end: Defines a point on the timeline of the input media at which + processing will end. Defaults to the end of the input media. + :type end: ~azure.mgmt.media.models.ClipTime :param label: A label that is assigned to a JobInputClip, that is used to satisfy a reference used in the Transform. For example, a Transform can be authored so as to take an image file with the label 'xyz' and apply it as @@ -43,11 +49,13 @@ class JobInputHttp(JobInputClip): _attribute_map = { 'odatatype': {'key': '@odata\\.type', 'type': 'str'}, 'files': {'key': 'files', 'type': '[str]'}, + 'start': {'key': 'start', 'type': 'ClipTime'}, + 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, 'base_uri': {'key': 'baseUri', 'type': 'str'}, } - def __init__(self, *, files=None, label: str=None, base_uri: str=None, **kwargs) -> None: - super(JobInputHttp, self).__init__(files=files, label=label, **kwargs) + def __init__(self, *, files=None, start=None, end=None, label: str=None, base_uri: str=None, **kwargs) -> None: + super(JobInputHttp, self).__init__(files=files, start=start, end=end, label=label, **kwargs) self.base_uri = base_uri self.odatatype = '#Microsoft.Media.JobInputHttp' diff --git a/azure-mgmt-media/azure/mgmt/media/models/video_analyzer_preset.py b/azure-mgmt-media/azure/mgmt/media/models/video_analyzer_preset.py index fb9a378028d7..dd017d19143a 100644 --- a/azure-mgmt-media/azure/mgmt/media/models/video_analyzer_preset.py +++ b/azure-mgmt-media/azure/mgmt/media/models/video_analyzer_preset.py @@ -37,11 +37,16 @@ class VideoAnalyzerPreset(AudioAnalyzerPreset): clearly discernable speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." :type audio_language: str - :param insights_to_extract: The type of insights to be extracted. If not - set then based on the content the type will selected. If the content is - audio only then only audio insights are extracted and if it is video only. - Possible values include: 'AudioInsightsOnly', 'VideoInsightsOnly', - 'AllInsights' + :param insights_to_extract: Defines the type of insights that you want the + service to generate. The allowed values are 'AudioInsightsOnly', + 'VideoInsightsOnly', and 'AllInsights'. The default is AllInsights. If you + set this to AllInsights and the input is audio only, then only audio + insights are generated. Similarly if the input is video only, then only + video insights are generated. It is recommended that you not use + AudioInsightsOnly if you expect some of your inputs to be video only; or + use VideoInsightsOnly if you expect some of your inputs to be audio only. + Your Jobs in such conditions would error out. Possible values include: + 'AudioInsightsOnly', 'VideoInsightsOnly', 'AllInsights' :type insights_to_extract: str or ~azure.mgmt.media.models.InsightsType """ diff --git a/azure-mgmt-media/azure/mgmt/media/models/video_analyzer_preset_py3.py b/azure-mgmt-media/azure/mgmt/media/models/video_analyzer_preset_py3.py index dfef85d00d06..26b496246d08 100644 --- a/azure-mgmt-media/azure/mgmt/media/models/video_analyzer_preset_py3.py +++ b/azure-mgmt-media/azure/mgmt/media/models/video_analyzer_preset_py3.py @@ -37,11 +37,16 @@ class VideoAnalyzerPreset(AudioAnalyzerPreset): clearly discernable speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'." :type audio_language: str - :param insights_to_extract: The type of insights to be extracted. If not - set then based on the content the type will selected. If the content is - audio only then only audio insights are extracted and if it is video only. - Possible values include: 'AudioInsightsOnly', 'VideoInsightsOnly', - 'AllInsights' + :param insights_to_extract: Defines the type of insights that you want the + service to generate. The allowed values are 'AudioInsightsOnly', + 'VideoInsightsOnly', and 'AllInsights'. The default is AllInsights. If you + set this to AllInsights and the input is audio only, then only audio + insights are generated. Similarly if the input is video only, then only + video insights are generated. It is recommended that you not use + AudioInsightsOnly if you expect some of your inputs to be video only; or + use VideoInsightsOnly if you expect some of your inputs to be audio only. + Your Jobs in such conditions would error out. Possible values include: + 'AudioInsightsOnly', 'VideoInsightsOnly', 'AllInsights' :type insights_to_extract: str or ~azure.mgmt.media.models.InsightsType """