Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions azure-mgmt-media/MANIFEST.in
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
recursive-include tests *.py *.yaml
include *.rst
include azure/__init__.py
include azure/mgmt/__init__.py
Expand Down
19 changes: 0 additions & 19 deletions azure-mgmt-media/README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -14,25 +14,6 @@ For the older Azure Service Management (ASM) libraries, see
For a more complete set of Azure libraries, see the `azure <https://pypi.python.org/pypi/azure>`__ bundle package.


Compatibility
=============

**IMPORTANT**: If you have an earlier version of the azure package
(version < 1.0), you should uninstall it before installing this package.

You can check the version using pip:

.. code:: shell

pip freeze

If you see azure==0.11.0 (or any version below 1.0), uninstall it first:

.. code:: shell

pip uninstall azure


Usage
=====

Expand Down
5 changes: 5 additions & 0 deletions azure-mgmt-media/azure/mgmt/media/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@
from .codec_py3 import Codec
from .audio_py3 import Audio
from .aac_audio_py3 import AacAudio
from .face_detector_preset_py3 import FaceDetectorPreset
from .audio_analyzer_preset_py3 import AudioAnalyzerPreset
from .overlay_py3 import Overlay
from .audio_overlay_py3 import AudioOverlay
Expand Down Expand Up @@ -212,6 +213,7 @@
from .codec import Codec
from .audio import Audio
from .aac_audio import AacAudio
from .face_detector_preset import FaceDetectorPreset
from .audio_analyzer_preset import AudioAnalyzerPreset
from .overlay import Overlay
from .audio_overlay import AudioOverlay
Expand Down Expand Up @@ -321,6 +323,7 @@
ContentKeyPolicyRestrictionTokenType,
ContentKeyPolicyFairPlayRentalAndLeaseKeyType,
AacAudioProfile,
AnalysisResolution,
StretchMode,
DeinterlaceParity,
DeinterlaceMode,
Expand Down Expand Up @@ -410,6 +413,7 @@
'Codec',
'Audio',
'AacAudio',
'FaceDetectorPreset',
'AudioAnalyzerPreset',
'Overlay',
'AudioOverlay',
Expand Down Expand Up @@ -518,6 +522,7 @@
'ContentKeyPolicyRestrictionTokenType',
'ContentKeyPolicyFairPlayRentalAndLeaseKeyType',
'AacAudioProfile',
'AnalysisResolution',
'StretchMode',
'DeinterlaceParity',
'DeinterlaceMode',
Expand Down
22 changes: 14 additions & 8 deletions azure-mgmt-media/azure/mgmt/media/models/audio_analyzer_preset.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,21 @@ class AudioAnalyzerPreset(Preset):
:param odatatype: Required. Constant filled by server.
:type odatatype: str
:param audio_language: The language for the audio payload in the input
using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). The list
of supported languages are, 'en-US', 'en-GB', 'es-ES', 'es-MX', 'fr-FR',
'it-IT', 'ja-JP', 'pt-BR', 'zh-CN', 'de-DE', 'ar-EG', 'ru-RU', 'hi-IN'. If
not specified, automatic language detection would be employed. This
using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). The list
of supported languages are English ('en-US' and 'en-GB'), Spanish ('es-ES'
and 'es-MX'), French ('fr-FR'), Italian ('it-IT'), Japanese ('ja-JP'),
Portuguese ('pt-BR'), Chinese ('zh-CN'), German ('de-DE'), Arabic
('ar-EG'), Russian ('ru-RU'), Hindi ('hi-IN'), and Korean ('ko-KR'). If
you know the language of your content, it is recommended that you specify
it. If the language isn't specified or set to null, automatic language
detection will choose the first language detected and process with the
selected language for the duration of the file. This language detection
feature currently supports English, Chinese, French, German, Italian,
Japanese, Spanish, Russian, and Portuguese. The automatic detection works
best with audio recordings with clearly discernable speech. If automatic
detection fails to find the language, transcription would fallback to
English.
Japanese, Spanish, Russian, and Portuguese. It does not currently support
dynamically switching between languages after the first language is
detected. The automatic detection works best with audio recordings with
clearly discernable speech. If automatic detection fails to find the
language, transcription would fallback to 'en-US'."
:type audio_language: str
"""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,21 @@ class AudioAnalyzerPreset(Preset):
:param odatatype: Required. Constant filled by server.
:type odatatype: str
:param audio_language: The language for the audio payload in the input
using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). The list
of supported languages are, 'en-US', 'en-GB', 'es-ES', 'es-MX', 'fr-FR',
'it-IT', 'ja-JP', 'pt-BR', 'zh-CN', 'de-DE', 'ar-EG', 'ru-RU', 'hi-IN'. If
not specified, automatic language detection would be employed. This
using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). The list
of supported languages are English ('en-US' and 'en-GB'), Spanish ('es-ES'
and 'es-MX'), French ('fr-FR'), Italian ('it-IT'), Japanese ('ja-JP'),
Portuguese ('pt-BR'), Chinese ('zh-CN'), German ('de-DE'), Arabic
('ar-EG'), Russian ('ru-RU'), Hindi ('hi-IN'), and Korean ('ko-KR'). If
you know the language of your content, it is recommended that you specify
it. If the language isn't specified or set to null, automatic language
detection will choose the first language detected and process with the
selected language for the duration of the file. This language detection
feature currently supports English, Chinese, French, German, Italian,
Japanese, Spanish, Russian, and Portuguese. The automatic detection works
best with audio recordings with clearly discernable speech. If automatic
detection fails to find the language, transcription would fallback to
English.
Japanese, Spanish, Russian, and Portuguese. It does not currently support
dynamically switching between languages after the first language is
detected. The automatic detection works best with audio recordings with
clearly discernable speech. If automatic detection fails to find the
language, transcription would fallback to 'en-US'."
:type audio_language: str
"""

Expand Down
11 changes: 6 additions & 5 deletions azure-mgmt-media/azure/mgmt/media/models/audio_overlay.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ class AudioOverlay(Overlay):

All required parameters must be populated in order to send to Azure.

:param input_label: The label of the job input which is to be used as an
overlay. The Input must specify exactly one file. You can specify an image
file in JPG or PNG formats, or an audio file (such as a WAV, MP3, WMA or
M4A file), or a video file. See https://aka.ms/mesformats for the complete
list of supported audio and video file formats.
:param input_label: Required. The label of the job input which is to be
used as an overlay. The Input must specify exactly one file. You can
specify an image file in JPG or PNG formats, or an audio file (such as a
WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats
for the complete list of supported audio and video file formats.
:type input_label: str
:param start: The start position, with reference to the input video, at
which the overlay starts. The value should be in ISO 8601 format. For
Expand Down Expand Up @@ -51,6 +51,7 @@ class AudioOverlay(Overlay):
"""

_validation = {
'input_label': {'required': True},
'odatatype': {'required': True},
}

Expand Down
13 changes: 7 additions & 6 deletions azure-mgmt-media/azure/mgmt/media/models/audio_overlay_py3.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@ class AudioOverlay(Overlay):

All required parameters must be populated in order to send to Azure.

:param input_label: The label of the job input which is to be used as an
overlay. The Input must specify exactly one file. You can specify an image
file in JPG or PNG formats, or an audio file (such as a WAV, MP3, WMA or
M4A file), or a video file. See https://aka.ms/mesformats for the complete
list of supported audio and video file formats.
:param input_label: Required. The label of the job input which is to be
used as an overlay. The Input must specify exactly one file. You can
specify an image file in JPG or PNG formats, or an audio file (such as a
WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats
for the complete list of supported audio and video file formats.
:type input_label: str
:param start: The start position, with reference to the input video, at
which the overlay starts. The value should be in ISO 8601 format. For
Expand Down Expand Up @@ -51,6 +51,7 @@ class AudioOverlay(Overlay):
"""

_validation = {
'input_label': {'required': True},
'odatatype': {'required': True},
}

Expand All @@ -64,6 +65,6 @@ class AudioOverlay(Overlay):
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
}

def __init__(self, *, input_label: str=None, start=None, end=None, fade_in_duration=None, fade_out_duration=None, audio_gain_level: float=None, **kwargs) -> None:
def __init__(self, *, input_label: str, start=None, end=None, fade_in_duration=None, fade_out_duration=None, audio_gain_level: float=None, **kwargs) -> None:
super(AudioOverlay, self).__init__(input_label=input_label, start=start, end=end, fade_in_duration=fade_in_duration, fade_out_duration=fade_out_duration, audio_gain_level=audio_gain_level, **kwargs)
self.odatatype = '#Microsoft.Media.AudioOverlay'
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,12 @@ class AacAudioProfile(str, Enum):
he_aac_v2 = "HeAacV2" #: Specifies that the output audio is to be encoded into HE-AAC v2 profile.


class AnalysisResolution(str, Enum):

source_resolution = "SourceResolution"
standard_definition = "StandardDefinition"


class StretchMode(str, Enum):

none = "None" #: Strictly respect the output resolution without considering the pixel aspect ratio or display aspect ratio of the input video.
Expand Down
10 changes: 8 additions & 2 deletions azure-mgmt-media/azure/mgmt/media/models/envelope_encryption.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,14 @@ class EnvelopeEncryption(Model):
:param content_keys: Representing default content key for each encryption
scheme and separate content keys for specific tracks
:type content_keys: ~azure.mgmt.media.models.StreamingPolicyContentKeys
:param custom_key_acquisition_url_template: KeyAcquisitionUrlTemplate is
used to point to user specified service to delivery content keys
:param custom_key_acquisition_url_template: Template for the URL of the
custom service delivering keys to end user players. Not required when
using Azure Media Services for issuing keys. The template supports
replaceable tokens that the service will update at runtime with the value
specific to the request. The currently supported token values are
{AlternativeMediaId}, which is replaced with the value of
StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is
replaced with the value of identifier of the key being requested.
:type custom_key_acquisition_url_template: str
"""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,14 @@ class EnvelopeEncryption(Model):
:param content_keys: Representing default content key for each encryption
scheme and separate content keys for specific tracks
:type content_keys: ~azure.mgmt.media.models.StreamingPolicyContentKeys
:param custom_key_acquisition_url_template: KeyAcquisitionUrlTemplate is
used to point to user specified service to delivery content keys
:param custom_key_acquisition_url_template: Template for the URL of the
custom service delivering keys to end user players. Not required when
using Azure Media Services for issuing keys. The template supports
replaceable tokens that the service will update at runtime with the value
specific to the request. The currently supported token values are
{AlternativeMediaId}, which is replaced with the value of
StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is
replaced with the value of identifier of the key being requested.
:type custom_key_acquisition_url_template: str
"""

Expand Down
51 changes: 51 additions & 0 deletions azure-mgmt-media/azure/mgmt/media/models/face_detector_preset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .preset import Preset


class FaceDetectorPreset(Preset):
"""Describes all the settings to be used when analyzing a video in order to
detect all the faces present.

All required parameters must be populated in order to send to Azure.

:param odatatype: Required. Constant filled by server.
:type odatatype: str
:param resolution: Specifies the maximum resolution at which your video is
analyzed. The default behavior is "SourceResolution," which will keep the
input video at its original resolution when analyzed. Using
"StandardDefinition" will resize input videos to standard definition while
preserving the appropriate aspect ratio. It will only resize if the video
is of higher resolution. For example, a 1920x1080 input would be scaled to
640x360 before processing. Switching to "StandardDefinition" will reduce
the time it takes to process high resolution video. It may also reduce the
cost of using this component (see
https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics
for details). However, faces that end up being too small in the resized
video may not be detected. Possible values include: 'SourceResolution',
'StandardDefinition'
:type resolution: str or ~azure.mgmt.media.models.AnalysisResolution
"""

_validation = {
'odatatype': {'required': True},
}

_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'resolution': {'key': 'resolution', 'type': 'str'},
}

def __init__(self, **kwargs):
super(FaceDetectorPreset, self).__init__(**kwargs)
self.resolution = kwargs.get('resolution', None)
self.odatatype = '#Microsoft.Media.FaceDetectorPreset'
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------

from .preset_py3 import Preset


class FaceDetectorPreset(Preset):
"""Describes all the settings to be used when analyzing a video in order to
detect all the faces present.

All required parameters must be populated in order to send to Azure.

:param odatatype: Required. Constant filled by server.
:type odatatype: str
:param resolution: Specifies the maximum resolution at which your video is
analyzed. The default behavior is "SourceResolution," which will keep the
input video at its original resolution when analyzed. Using
"StandardDefinition" will resize input videos to standard definition while
preserving the appropriate aspect ratio. It will only resize if the video
is of higher resolution. For example, a 1920x1080 input would be scaled to
640x360 before processing. Switching to "StandardDefinition" will reduce
the time it takes to process high resolution video. It may also reduce the
cost of using this component (see
https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics
for details). However, faces that end up being too small in the resized
video may not be detected. Possible values include: 'SourceResolution',
'StandardDefinition'
:type resolution: str or ~azure.mgmt.media.models.AnalysisResolution
"""

_validation = {
'odatatype': {'required': True},
}

_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'resolution': {'key': 'resolution', 'type': 'str'},
}

def __init__(self, *, resolution=None, **kwargs) -> None:
super(FaceDetectorPreset, self).__init__(**kwargs)
self.resolution = resolution
self.odatatype = '#Microsoft.Media.FaceDetectorPreset'
7 changes: 4 additions & 3 deletions azure-mgmt-media/azure/mgmt/media/models/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ class Format(Model):

All required parameters must be populated in order to send to Azure.

:param filename_pattern: The pattern of the file names for the generated
output files. The following macros are supported in the file name:
{Basename} - The base name of the input video {Extension} - The
:param filename_pattern: Required. The pattern of the file names for the
generated output files. The following macros are supported in the file
name: {Basename} - The base name of the input video {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the
codec/layer. {Index} - A unique index for thumbnails. Only applicable to
thumbnails. {Bitrate} - The audio/video bitrate. Not applicable to
Expand All @@ -34,6 +34,7 @@ class Format(Model):
"""

_validation = {
'filename_pattern': {'required': True},
'odatatype': {'required': True},
}

Expand Down
9 changes: 5 additions & 4 deletions azure-mgmt-media/azure/mgmt/media/models/format_py3.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ class Format(Model):

All required parameters must be populated in order to send to Azure.

:param filename_pattern: The pattern of the file names for the generated
output files. The following macros are supported in the file name:
{Basename} - The base name of the input video {Extension} - The
:param filename_pattern: Required. The pattern of the file names for the
generated output files. The following macros are supported in the file
name: {Basename} - The base name of the input video {Extension} - The
appropriate extension for this format. {Label} - The label assigned to the
codec/layer. {Index} - A unique index for thumbnails. Only applicable to
thumbnails. {Bitrate} - The audio/video bitrate. Not applicable to
Expand All @@ -34,6 +34,7 @@ class Format(Model):
"""

_validation = {
'filename_pattern': {'required': True},
'odatatype': {'required': True},
}

Expand All @@ -46,7 +47,7 @@ class Format(Model):
'odatatype': {'#Microsoft.Media.ImageFormat': 'ImageFormat', '#Microsoft.Media.MultiBitrateFormat': 'MultiBitrateFormat'}
}

def __init__(self, *, filename_pattern: str=None, **kwargs) -> None:
def __init__(self, *, filename_pattern: str, **kwargs) -> None:
super(Format, self).__init__(**kwargs)
self.filename_pattern = filename_pattern
self.odatatype = None
Loading