Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"description": "This Swagger was generated by the API Framework.",
"version": "2018-07-01"
},
"host": "management.azure.com",
"host": "management.azure.com",
"consumes": [
"application/json"
],
Expand Down Expand Up @@ -125,6 +125,38 @@
"type": "object",
"description": "Describes Advanced Audio Codec (AAC) audio encoding settings."
},
"FaceDetectorPreset": {
"x-ms-discriminator-value": "#Microsoft.Media.FaceDetectorPreset",
"allOf": [
{
"$ref": "#/definitions/Preset"
}
],
"properties": {
"resolution": {
"type": "string",
"enum": [
"SourceResolution",
"StandardDefinition"
],
"x-ms-enum": {
"name": "AnalysisResolution",
"values": [
{
"value": "SourceResolution"
},
{
"value": "StandardDefinition"
}
],
"modelAsString": true
},
"description": "Specifies the maximum resolution at which your video is analyzed. The default behavior is \"SourceResolution,\" which will keep the input video at its original resolution when analyzed. Using \"StandardDefinition\" will resize input videos to standard definition while preserving the appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to \"StandardDefinition\" will reduce the time it takes to process high resolution video. It may also reduce the cost of using this component (see https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). However, faces that end up being too small in the resized video may not be detected."
}
},
"type": "object",
"description": "Describes all the settings to be used when analyzing a video in order to detect all the faces present."
},
"AudioAnalyzerPreset": {
"x-ms-discriminator-value": "#Microsoft.Media.AudioAnalyzerPreset",
"allOf": [
Expand All @@ -135,7 +167,7 @@
"properties": {
"audioLanguage": {
"type": "string",
"description": "The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). The list of supported languages are, 'en-US', 'en-GB', 'es-ES', 'es-MX', 'fr-FR', 'it-IT', 'ja-JP', 'pt-BR', 'zh-CN', 'de-DE', 'ar-EG', 'ru-RU', 'hi-IN'. If not specified, automatic language detection would be employed. This feature currently supports English, Chinese, French, German, Italian, Japanese, Spanish, Russian, and Portuguese. The automatic detection works best with audio recordings with clearly discernable speech. If automatic detection fails to find the language, transcription would fallback to English."
"description": "The language for the audio payload in the input using the BCP-47 format of 'language tag-region' (e.g: 'en-US'). The list of supported languages are English ('en-US' and 'en-GB'), Spanish ('es-ES' and 'es-MX'), French ('fr-FR'), Italian ('it-IT'), Japanese ('ja-JP'), Portuguese ('pt-BR'), Chinese ('zh-CN'), German ('de-DE'), Arabic ('ar-EG'), Russian ('ru-RU'), Hindi ('hi-IN'), and Korean ('ko-KR'). If you know the language of your content, it is recommended that you specify it. If the language isn't specified or set to null, automatic language detection will choose the first language detected and process with the selected language for the duration of the file. This language detection feature currently supports English, Chinese, French, German, Italian, Japanese, Spanish, Russian, and Portuguese. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernable speech. If automatic detection fails to find the language, transcription would fallback to 'en-US'.\""
}
},
"type": "object",
Expand Down Expand Up @@ -180,7 +212,8 @@
},
"type": "object",
"required": [
"@odata.type"
"@odata.type",
"inputLabel"
],
"description": "Base type for all overlays - image, audio or video."
},
Expand Down Expand Up @@ -272,6 +305,9 @@
}
},
"type": "object",
"required": [
"start"
],
"description": "Describes the basic properties for generating thumbnails from the input video"
},
"Format": {
Expand All @@ -288,7 +324,8 @@
},
"type": "object",
"required": [
"@odata.type"
"@odata.type",
"filenamePattern"
],
"description": "Base class for output."
},
Expand Down Expand Up @@ -540,6 +577,9 @@
}
},
"type": "object",
"required": [
"bitrate"
],
"description": "Describes the settings to be used when encoding the input video into a desired output bitrate layer."
},
"H264Layer": {
Expand Down Expand Up @@ -718,19 +758,6 @@
"type": "object",
"description": "Describes the settings to produce a JPEG image from the input video."
},
"OutputFile": {
"properties": {
"labels": {
"type": "array",
"items": {
"type": "string"
},
"description": "The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like '[v1, a1]' tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1."
}
},
"type": "object",
"description": "Represents an output file produced."
},
"MultiBitrateFormat": {
"x-ms-discriminator-value": "#Microsoft.Media.MultiBitrateFormat",
"allOf": [
Expand All @@ -750,6 +777,22 @@
"type": "object",
"description": "Describes the properties for producing a collection of GOP aligned multi-bitrate files. The default behavior is to produce one output file for each video layer which is muxed together with all the audios. The exact output files produced can be controlled by specifying the outputFiles collection."
},
"OutputFile": {
"properties": {
"labels": {
"type": "array",
"items": {
"type": "string"
},
"description": "The list of labels that describe how the encoder should multiplex video and audio into an output file. For example, if the encoder is producing two video layers with labels v1 and v2, and one audio layer with label a1, then an array like '[v1, a1]' tells the encoder to produce an output file with the video track represented by v1 and the audio track represented by a1."
}
},
"type": "object",
"required": [
"labels"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Changing properties from optional to required is a breaking change. Are these properties required since the introduction of this API Version?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The OutputFile object which has the labels is an optional property, which no one specifies (based on Kusto queries across all DCs). If someone were to specify an outputFile, they would want to specify the label, which acts as a directive to media processor, telling it how to combine bitrates.
We don't consider this to be a breaking change.

],
"description": "Represents an output file produced."
},
"Mp4Format": {
"x-ms-discriminator-value": "#Microsoft.Media.Mp4Format",
"allOf": [
Expand Down Expand Up @@ -886,6 +929,10 @@
}
},
"type": "object",
"required": [
"codecs",
"formats"
],
"description": "Describes all the settings to be used when encoding the input video with the Standard Encoder."
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Changing properties from optional to required is a breaking change. Are these properties required since the introduction of this API Version?

Copy link
Author

@BrianBlum BrianBlum Mar 11, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These properties have been required since the introduction of this version of the API. The API calls were failing if these properties weren't provided. This change just moves the validation to the client.

},
"VideoAnalyzerPreset": {
Expand Down Expand Up @@ -1102,7 +1149,7 @@
"items": {
"type": "string"
},
"description": "List of files. Required for JobInputHttp."
"description": "List of files. Required for JobInputHttp. Maximum of 4000 characters each."
},
"label": {
"type": "string",
Expand Down Expand Up @@ -1160,7 +1207,7 @@
"properties": {
"baseUri": {
"type": "string",
"description": "Base URI for HTTPS job input. It will be concatenated with provided file names. If no base uri is given, then the provided file list is assumed to be fully qualified uris."
"description": "Base URI for HTTPS job input. It will be concatenated with provided file names. If no base uri is given, then the provided file list is assumed to be fully qualified uris. Maximum length of 4000 characters."
}
},
"type": "object",
Expand Down Expand Up @@ -1533,7 +1580,7 @@
"additionalProperties": {
"type": "string"
},
"description": "Customer provided correlation data that will be returned in Job and JobOutput state events."
"description": "Customer provided key, value pairs that will be returned in Job and JobOutput state events."
}
},
"type": "object",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@
"properties": {
"customLicenseAcquisitionUrlTemplate": {
"type": "string",
"description": "The template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys."
"description": "Template for the URL of the custom service delivering licenses to end user players. Not required when using Azure Media Services for issuing licenses. The template supports replaceable tokens that the service will update at runtime with the value specific to the request. The currently supported token values are {AlternativeMediaId}, which is replaced with the value of StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of identifier of the key being requested."
},
"playReadyCustomAttributes": {
"type": "string",
Expand All @@ -168,7 +168,7 @@
"properties": {
"customLicenseAcquisitionUrlTemplate": {
"type": "string",
"description": "The template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys."
"description": "Template for the URL of the custom service delivering licenses to end user players. Not required when using Azure Media Services for issuing licenses. The template supports replaceable tokens that the service will update at runtime with the value specific to the request. The currently supported token values are {AlternativeMediaId}, which is replaced with the value of StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of identifier of the key being requested."
}
},
"type": "object",
Expand All @@ -178,7 +178,7 @@
"properties": {
"customLicenseAcquisitionUrlTemplate": {
"type": "string",
"description": "The template for a customer service to deliver keys to end users. Not needed when using Azure Media Services for issuing keys."
"description": "Template for the URL of the custom service delivering licenses to end user players. Not required when using Azure Media Services for issuing licenses. The template supports replaceable tokens that the service will update at runtime with the value specific to the request. The currently supported token values are {AlternativeMediaId}, which is replaced with the value of StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of identifier of the key being requested."
},
"allowPersistentLicense": {
"type": "boolean",
Expand Down Expand Up @@ -280,7 +280,7 @@
},
"customKeyAcquisitionUrlTemplate": {
"type": "string",
"description": "KeyAcquisitionUrlTemplate is used to point to user specified service to delivery content keys"
"description": "Template for the URL of the custom service delivering keys to end user players. Not required when using Azure Media Services for issuing keys. The template supports replaceable tokens that the service will update at runtime with the value specific to the request. The currently supported token values are {AlternativeMediaId}, which is replaced with the value of StreamingLocatorId.AlternativeMediaId, and {ContentKeyId}, which is replaced with the value of identifier of the key being requested."
}
},
"type": "object",
Expand Down Expand Up @@ -575,6 +575,13 @@
"alternativeMediaId": {
"type": "string",
"description": "Alternative Media ID of this Streaming Locator"
},
"filters": {
"type": "array",
"items": {
"type": "string"
},
"description": "A list of asset or account filters which apply to this streaming locator"
}
},
"type": "object",
Expand Down